diff --git a/io_anim_bvh/__init__.py b/io_anim_bvh/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5c57d9b66ed6bbde70f50d9bae6f42aabe3038f
--- /dev/null
+++ b/io_anim_bvh/__init__.py
@@ -0,0 +1,133 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+    "name": "BioVision Motion Capture (BVH) format",
+    "author": "Campbell Barton",
+    "location": "File > Import-Export",
+    "description": "Import-Export BVH from armature objects",
+    "warning": "",
+    "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+        "Scripts/Import-Export/MotionCapture_BVH",
+    "tracker_url": "",
+    "support": 'OFFICIAL',
+    "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+    import imp
+    if "import_bvh" in locals():
+        imp.reload(import_bvh)
+
+
+import bpy
+from bpy.props import *
+from io_utils import ImportHelper, ExportHelper
+
+
+class ImportBVH(bpy.types.Operator, ImportHelper):
+    '''Load a BVH motion capture file'''
+    bl_idname = "import_anim.bvh"
+    bl_label = "Import BVH"
+
+    filename_ext = ".bvh"
+    filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
+
+    target = EnumProperty(items=(
+            ('ARMATURE', "Armature", ""),
+            ('OBJECT', "Object", ""),
+            ),
+                name="Target",
+                description="Import target type.",
+                default='ARMATURE')
+
+    global_scale = FloatProperty(name="Scale", description="Scale the BVH by this value", min=0.0001, max=1000000.0, soft_min=0.001, soft_max=100.0, default=1.0)
+    frame_start = IntProperty(name="Start Frame", description="Starting frame for the animation", default=1)
+    use_cyclic = BoolProperty(name="Loop", description="Loop the animation playback", default=False)
+    rotate_mode = EnumProperty(items=(
+            ('QUATERNION', "Quaternion", "Convert rotations to quaternions"),
+            ('NATIVE', "Euler (Native)", "Use the rotation order defined in the BVH file"),
+            ('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
+            ('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
+            ('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
+            ('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
+            ('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
+            ('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
+            ),
+                name="Rotation",
+                description="Rotation conversion.",
+                default='NATIVE')
+
+    def execute(self, context):
+        from . import import_bvh
+        return import_bvh.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
+
+
+class ExportBVH(bpy.types.Operator, ExportHelper):
+    '''Save a BVH motion capture file from an armature'''
+    bl_idname = "export_anim.bvh"
+    bl_label = "Export BVH"
+
+    filename_ext = ".bvh"
+    filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
+
+    global_scale = FloatProperty(name="Scale", description="Scale the BVH by this value", min=0.0001, max=1000000.0, soft_min=0.001, soft_max=100.0, default=1.0)
+    frame_start = IntProperty(name="Start Frame", description="Starting frame to export", default=0)
+    frame_end = IntProperty(name="End Frame", description="End frame to export", default=0)
+
+    @classmethod
+    def poll(cls, context):
+        obj = context.object
+        return obj and obj.type == 'ARMATURE'
+
+    def invoke(self, context, event):
+        self.frame_start = context.scene.frame_start
+        self.frame_end = context.scene.frame_end
+
+        return super().invoke(context, event)
+
+    def execute(self, context):
+        if self.frame_start == 0 and self.frame_end == 0:
+            self.frame_start = context.scene.frame_start
+            self.frame_end = context.scene.frame_end
+
+        from . import export_bvh
+        return export_bvh.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+
+def menu_func_import(self, context):
+    self.layout.operator(ImportBVH.bl_idname, text="Motion Capture (.bvh)")
+
+
+def menu_func_export(self, context):
+    self.layout.operator(ExportBVH.bl_idname, text="Motion Capture (.bvh)")
+
+
+def register():
+    bpy.types.INFO_MT_file_import.append(menu_func_import)
+    bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+    bpy.types.INFO_MT_file_import.remove(menu_func_import)
+    bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+if __name__ == "__main__":
+    register()
diff --git a/io_anim_bvh/export_bvh.py b/io_anim_bvh/export_bvh.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc7b4207c7320a75a946043f3a9e787256de30d8
--- /dev/null
+++ b/io_anim_bvh/export_bvh.py
@@ -0,0 +1,245 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+# fixes from Andrea Rugliancich
+
+import bpy
+
+
+def write_armature(context, filepath, frame_start, frame_end, global_scale=1.0):
+
+    from mathutils import Matrix, Vector, Euler
+    from math import degrees
+
+    file = open(filepath, "w")
+
+    obj = context.object
+    arm = obj.data
+
+    # Build a dictionary of children.
+    # None for parentless
+    children = {None: []}
+
+    # initialize with blank lists
+    for bone in arm.bones:
+        children[bone.name] = []
+
+    for bone in arm.bones:
+        children[getattr(bone.parent, "name", None)].append(bone.name)
+
+    # sort the children
+    for children_list in children.values():
+        children_list.sort()
+
+    # bone name list in the order that the bones are written
+    serialized_names = []
+
+    node_locations = {}
+
+    file.write("HIERARCHY\n")
+
+    def write_recursive_nodes(bone_name, indent):
+        my_children = children[bone_name]
+
+        indent_str = "\t" * indent
+
+        bone = arm.bones[bone_name]
+        loc = bone.head_local
+        node_locations[bone_name] = loc
+
+        # make relative if we can
+        if bone.parent:
+            loc = loc - node_locations[bone.parent.name]
+
+        if indent:
+            file.write("%sJOINT %s\n" % (indent_str, bone_name))
+        else:
+            file.write("%sROOT %s\n" % (indent_str, bone_name))
+
+        file.write("%s{\n" % indent_str)
+        file.write("%s\tOFFSET %.6f %.6f %.6f\n" % (indent_str, loc.x * global_scale, loc.y * global_scale, loc.z * global_scale))
+        if bone.use_connect and bone.parent:
+            file.write("%s\tCHANNELS 3 Xrotation Yrotation Zrotation\n" % indent_str)
+        else:
+            file.write("%s\tCHANNELS 6 Xposition Yposition Zposition Xrotation Yrotation Zrotation\n" % indent_str)
+
+        if my_children:
+            # store the location for the children
+            # to het their relative offset
+
+            # Write children
+            for child_bone in my_children:
+                serialized_names.append(child_bone)
+                write_recursive_nodes(child_bone, indent + 1)
+
+        else:
+            # Write the bone end.
+            file.write("%s\tEnd Site\n" % indent_str)
+            file.write("%s\t{\n" % indent_str)
+            loc = bone.tail_local - node_locations[bone_name]
+            file.write("%s\t\tOFFSET %.6f %.6f %.6f\n" % (indent_str, loc.x * global_scale, loc.y * global_scale, loc.z * global_scale))
+            file.write("%s\t}\n" % indent_str)
+
+        file.write("%s}\n" % indent_str)
+
+    if len(children[None]) == 1:
+        key = children[None][0]
+        serialized_names.append(key)
+        indent = 0
+
+        write_recursive_nodes(key, indent)
+
+    else:
+        # Write a dummy parent node
+        file.write("ROOT %s\n" % key)
+        file.write("{\n")
+        file.write("\tOFFSET 0.0 0.0 0.0\n")
+        file.write("\tCHANNELS 0\n")  # Xposition Yposition Zposition Xrotation Yrotation Zrotation
+        key = None
+        indent = 1
+
+        write_recursive_nodes(key, indent)
+
+        file.write("}\n")
+
+    # redefine bones as sorted by serialized_names
+    # so we can write motion
+
+    class decorated_bone(object):
+        __slots__ = (\
+        "name",  # bone name, used as key in many places
+        "parent",  # decorated bone parent, set in a later loop
+        "rest_bone",  # blender armature bone
+        "pose_bone",  # blender pose bone
+        "pose_mat",  # blender pose matrix
+        "rest_arm_mat",  # blender rest matrix (armature space)
+        "rest_local_mat",  # blender rest batrix (local space)
+        "pose_imat",  # pose_mat inverted
+        "rest_arm_imat",  # rest_arm_mat inverted
+        "rest_local_imat",  # rest_local_mat inverted
+        "prev_euler",  # last used euler to preserve euler compability in between keyframes
+        "connected",  # is the bone connected to the parent bone?
+        )
+
+        def __init__(self, bone_name):
+            self.name = bone_name
+            self.rest_bone = arm.bones[bone_name]
+            self.pose_bone = obj.pose.bones[bone_name]
+
+            self.pose_mat = self.pose_bone.matrix
+
+            mat = self.rest_bone.matrix
+            self.rest_arm_mat = self.rest_bone.matrix_local
+            self.rest_local_mat = self.rest_bone.matrix
+
+            # inverted mats
+            self.pose_imat = self.pose_mat.copy().invert()
+            self.rest_arm_imat = self.rest_arm_mat.copy().invert()
+            self.rest_local_imat = self.rest_local_mat.copy().invert()
+
+            self.parent = None
+            self.prev_euler = Euler((0.0, 0.0, 0.0))
+            self.connected = (self.rest_bone.use_connect and self.rest_bone.parent)
+
+        def update_posedata(self):
+            self.pose_mat = self.pose_bone.matrix
+            self.pose_imat = self.pose_mat.copy().invert()
+
+        def __repr__(self):
+            if self.parent:
+                return "[\"%s\" child on \"%s\"]\n" % (self.name, self.parent.name)
+            else:
+                return "[\"%s\" root bone]\n" % (self.name)
+
+    bones_decorated = [decorated_bone(bone_name) for bone_name in serialized_names]
+
+    # Assign parents
+    bones_decorated_dict = {}
+    for dbone in bones_decorated:
+        bones_decorated_dict[dbone.name] = dbone
+
+    for dbone in bones_decorated:
+        parent = dbone.rest_bone.parent
+        if parent:
+            dbone.parent = bones_decorated_dict[parent.name]
+    del bones_decorated_dict
+    # finish assigning parents
+
+    scene = bpy.context.scene
+
+    file.write("MOTION\n")
+    file.write("Frames: %d\n" % (frame_end - frame_start + 1))
+    file.write("Frame Time: %.6f\n" % (1.0 / (scene.render.fps / scene.render.fps_base)))
+
+    for frame in range(frame_start, frame_end + 1):
+        scene.frame_set(frame)
+
+        for dbone in bones_decorated:
+            dbone.update_posedata()
+
+        for dbone in bones_decorated:
+            trans = Matrix.Translation(dbone.rest_bone.head_local)
+            itrans = Matrix.Translation(-dbone.rest_bone.head_local)
+
+            if  dbone.parent:
+                mat_final = dbone.parent.rest_arm_mat * dbone.parent.pose_imat * dbone.pose_mat * dbone.rest_arm_imat
+                mat_final = itrans * mat_final * trans
+                loc = mat_final.translation_part() + (dbone.rest_bone.head_local - dbone.parent.rest_bone.head_local)
+            else:
+                mat_final = dbone.pose_mat * dbone.rest_arm_imat
+                mat_final = itrans * mat_final * trans
+                loc = mat_final.translation_part() + dbone.rest_bone.head
+
+            # keep eulers compatible, no jumping on interpolation.
+            rot = mat_final.rotation_part().invert().to_euler('XYZ', dbone.prev_euler)
+
+            if not dbone.connected:
+                file.write("%.6f %.6f %.6f " % (loc * global_scale)[:])
+
+            file.write("%.6f %.6f %.6f " % (-degrees(rot[0]), -degrees(rot[1]), -degrees(rot[2])))
+
+            dbone.prev_euler = rot
+
+        file.write("\n")
+
+    file.close()
+
+    print("BVH Exported: %s frames:%d\n" % (filepath, frame_end - frame_start + 1))
+
+
+def save(operator, context, filepath="",
+          frame_start=-1,
+          frame_end=-1,
+          global_scale=1.0,
+          ):
+
+    write_armature(context, filepath,
+           frame_start=frame_start,
+           frame_end=frame_end,
+           global_scale=global_scale,
+           )
+
+    return {'FINISHED'}
+
+
+if __name__ == "__main__":
+    scene = bpy.context.scene
+    _read(bpy.data.filepath.rstrip(".blend") + ".bvh", bpy.context.object, scene.frame_start, scene.frame_end, 1.0)
diff --git a/io_anim_bvh/import_bvh.py b/io_anim_bvh/import_bvh.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a0c0fcfd452dd4499f61dc4cfb6bc223924acb3
--- /dev/null
+++ b/io_anim_bvh/import_bvh.py
@@ -0,0 +1,550 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+
+import math
+from math import radians
+
+import bpy
+import mathutils
+from mathutils import Vector, Euler, Matrix
+
+
+class bvh_node_class(object):
+    __slots__ = (
+    'name',  # bvh joint name
+    'parent',  # bvh_node_class type or None for no parent
+    'children',  # a list of children of this type.
+    'rest_head_world',  # worldspace rest location for the head of this node
+    'rest_head_local',  # localspace rest location for the head of this node
+    'rest_tail_world',  # worldspace rest location for the tail of this node
+    'rest_tail_local',  # worldspace rest location for the tail of this node
+    'channels',  # list of 6 ints, -1 for an unused channel, otherwise an index for the BVH motion data lines, lock triple then rot triple
+    'rot_order',  # a triple of indicies as to the order rotation is applied. [0,1,2] is x/y/z - [None, None, None] if no rotation.
+    'rot_order_str',  # same as above but a string 'XYZ' format.
+    'anim_data',  # a list one tuple's one for each frame. (locx, locy, locz, rotx, roty, rotz), euler rotation ALWAYS stored xyz order, even when native used.
+    'has_loc',  # Conveinience function, bool, same as (channels[0]!=-1 or channels[1]!=-1 channels[2]!=-1)
+    'has_rot',  # Conveinience function, bool, same as (channels[3]!=-1 or channels[4]!=-1 channels[5]!=-1)
+    'temp')  # use this for whatever you want
+
+    _eul_order_lookup = {\
+        (0, 1, 2): 'XYZ',
+        (0, 2, 1): 'XZY',
+        (1, 0, 2): 'YXZ',
+        (1, 2, 0): 'YZX',
+        (2, 0, 1): 'ZXY',
+        (2, 1, 0): 'ZYX'}
+
+    def __init__(self, name, rest_head_world, rest_head_local, parent, channels, rot_order):
+        self.name = name
+        self.rest_head_world = rest_head_world
+        self.rest_head_local = rest_head_local
+        self.rest_tail_world = None
+        self.rest_tail_local = None
+        self.parent = parent
+        self.channels = channels
+        self.rot_order = tuple(rot_order)
+        self.rot_order_str = __class__._eul_order_lookup[self.rot_order]
+
+        # convenience functions
+        self.has_loc = channels[0] != -1 or channels[1] != -1 or channels[2] != -1
+        self.has_rot = channels[3] != -1 or channels[4] != -1 or channels[5] != -1
+
+        self.children = []
+
+        # list of 6 length tuples: (lx,ly,lz, rx,ry,rz)
+        # even if the channels arnt used they will just be zero
+        #
+        self.anim_data = [(0, 0, 0, 0, 0, 0)]
+
+    def __repr__(self):
+        return 'BVH name:"%s", rest_loc:(%.3f,%.3f,%.3f), rest_tail:(%.3f,%.3f,%.3f)' %\
+        (self.name,\
+        self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z,\
+        self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z)
+
+
+def read_bvh(context, file_path, rotate_mode='XYZ', global_scale=1.0):
+    # File loading stuff
+    # Open the file for importing
+    file = open(file_path, 'rU')
+
+    # Seperate into a list of lists, each line a list of words.
+    file_lines = file.readlines()
+    # Non standard carrage returns?
+    if len(file_lines) == 1:
+        file_lines = file_lines[0].split('\r')
+
+    # Split by whitespace.
+    file_lines = [ll for ll in [l.split() for l in file_lines] if ll]
+
+    # Create Hirachy as empties
+    if file_lines[0][0].lower() == 'hierarchy':
+        #print 'Importing the BVH Hierarchy for:', file_path
+        pass
+    else:
+        raise 'ERROR: This is not a BVH file'
+
+    bvh_nodes = {None: None}
+    bvh_nodes_serial = [None]
+
+    channelIndex = -1
+
+    lineIdx = 0  # An index for the file.
+    while lineIdx < len(file_lines) - 1:
+        #...
+        if file_lines[lineIdx][0].lower() == 'root' or file_lines[lineIdx][0].lower() == 'joint':
+
+            # Join spaces into 1 word with underscores joining it.
+            if len(file_lines[lineIdx]) > 2:
+                file_lines[lineIdx][1] = '_'.join(file_lines[lineIdx][1:])
+                file_lines[lineIdx] = file_lines[lineIdx][:2]
+
+            # MAY NEED TO SUPPORT MULTIPLE ROOT's HERE!!!, Still unsure weather multiple roots are possible.??
+
+            # Make sure the names are unique- Object names will match joint names exactly and both will be unique.
+            name = file_lines[lineIdx][1]
+
+            #print '%snode: %s, parent: %s' % (len(bvh_nodes_serial) * '  ', name,  bvh_nodes_serial[-1])
+
+            lineIdx += 2  # Increment to the next line (Offset)
+            rest_head_local = Vector((float(file_lines[lineIdx][1]), float(file_lines[lineIdx][2]), float(file_lines[lineIdx][3]))) * global_scale
+            lineIdx += 1  # Increment to the next line (Channels)
+
+            # newChannel[Xposition, Yposition, Zposition, Xrotation, Yrotation, Zrotation]
+            # newChannel references indecies to the motiondata,
+            # if not assigned then -1 refers to the last value that will be added on loading at a value of zero, this is appended
+            # We'll add a zero value onto the end of the MotionDATA so this is always refers to a value.
+            my_channel = [-1, -1, -1, -1, -1, -1]
+            my_rot_order = [None, None, None]
+            rot_count = 0
+            for channel in file_lines[lineIdx][2:]:
+                channel = channel.lower()
+                channelIndex += 1  # So the index points to the right channel
+                if channel == 'xposition':
+                    my_channel[0] = channelIndex
+                elif channel == 'yposition':
+                    my_channel[1] = channelIndex
+                elif channel == 'zposition':
+                    my_channel[2] = channelIndex
+
+                elif channel == 'xrotation':
+                    my_channel[3] = channelIndex
+                    my_rot_order[rot_count] = 0
+                    rot_count += 1
+                elif channel == 'yrotation':
+                    my_channel[4] = channelIndex
+                    my_rot_order[rot_count] = 1
+                    rot_count += 1
+                elif channel == 'zrotation':
+                    my_channel[5] = channelIndex
+                    my_rot_order[rot_count] = 2
+                    rot_count += 1
+
+            channels = file_lines[lineIdx][2:]
+
+            my_parent = bvh_nodes_serial[-1]  # account for none
+
+            # Apply the parents offset accumulatively
+            if my_parent is None:
+                rest_head_world = Vector(rest_head_local)
+            else:
+                rest_head_world = my_parent.rest_head_world + rest_head_local
+
+            bvh_node = bvh_nodes[name] = bvh_node_class(name, rest_head_world, rest_head_local, my_parent, my_channel, my_rot_order)
+
+            # If we have another child then we can call ourselves a parent, else
+            bvh_nodes_serial.append(bvh_node)
+
+        # Account for an end node
+        if file_lines[lineIdx][0].lower() == 'end' and file_lines[lineIdx][1].lower() == 'site':  # There is sometimes a name after 'End Site' but we will ignore it.
+            lineIdx += 2  # Increment to the next line (Offset)
+            rest_tail = Vector((float(file_lines[lineIdx][1]), float(file_lines[lineIdx][2]), float(file_lines[lineIdx][3]))) * global_scale
+
+            bvh_nodes_serial[-1].rest_tail_world = bvh_nodes_serial[-1].rest_head_world + rest_tail
+            bvh_nodes_serial[-1].rest_tail_local = bvh_nodes_serial[-1].rest_head_local + rest_tail
+
+            # Just so we can remove the Parents in a uniform way- End has kids
+            # so this is a placeholder
+            bvh_nodes_serial.append(None)
+
+        if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0] == '}':  # == ['}']
+            bvh_nodes_serial.pop()  # Remove the last item
+
+        if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0].lower() == 'motion':
+            #print '\nImporting motion data'
+            lineIdx += 3  # Set the cursor to the first frame
+            break
+
+        lineIdx += 1
+
+    # Remove the None value used for easy parent reference
+    del bvh_nodes[None]
+    # Dont use anymore
+    del bvh_nodes_serial
+
+    bvh_nodes_list = bvh_nodes.values()
+
+    while lineIdx < len(file_lines):
+        line = file_lines[lineIdx]
+        for bvh_node in bvh_nodes_list:
+            #for bvh_node in bvh_nodes_serial:
+            lx = ly = lz = rx = ry = rz = 0.0
+            channels = bvh_node.channels
+            anim_data = bvh_node.anim_data
+            if channels[0] != -1:
+                lx = global_scale * float(line[channels[0]])
+
+            if channels[1] != -1:
+                ly = global_scale * float(line[channels[1]])
+
+            if channels[2] != -1:
+                lz = global_scale * float(line[channels[2]])
+
+            if channels[3] != -1 or channels[4] != -1 or channels[5] != -1:
+
+                rx = radians(float(line[channels[3]]))
+                ry = radians(float(line[channels[4]]))
+                rz = radians(float(line[channels[5]]))
+
+            # Done importing motion data #
+            anim_data.append((lx, ly, lz, rx, ry, rz))
+        lineIdx += 1
+
+    # Assign children
+    for bvh_node in bvh_nodes.values():
+        bvh_node_parent = bvh_node.parent
+        if bvh_node_parent:
+            bvh_node_parent.children.append(bvh_node)
+
+    # Now set the tip of each bvh_node
+    for bvh_node in bvh_nodes.values():
+
+        if not bvh_node.rest_tail_world:
+            if len(bvh_node.children) == 0:
+                # could just fail here, but rare BVH files have childless nodes
+                bvh_node.rest_tail_world = Vector(bvh_node.rest_head_world)
+                bvh_node.rest_tail_local = Vector(bvh_node.rest_head_local)
+            elif len(bvh_node.children) == 1:
+                bvh_node.rest_tail_world = Vector(bvh_node.children[0].rest_head_world)
+                bvh_node.rest_tail_local = bvh_node.rest_head_local + bvh_node.children[0].rest_head_local
+            else:
+                # allow this, see above
+                #if not bvh_node.children:
+                #	raise 'error, bvh node has no end and no children. bad file'
+
+                # Removed temp for now
+                rest_tail_world = Vector((0.0, 0.0, 0.0))
+                rest_tail_local = Vector((0.0, 0.0, 0.0))
+                for bvh_node_child in bvh_node.children:
+                    rest_tail_world += bvh_node_child.rest_head_world
+                    rest_tail_local += bvh_node_child.rest_head_local
+
+                bvh_node.rest_tail_world = rest_tail_world * (1.0 / len(bvh_node.children))
+                bvh_node.rest_tail_local = rest_tail_local * (1.0 / len(bvh_node.children))
+
+        # Make sure tail isnt the same location as the head.
+        if (bvh_node.rest_tail_local - bvh_node.rest_head_local).length <= 0.001 * global_scale:
+            bvh_node.rest_tail_local.y = bvh_node.rest_tail_local.y + global_scale / 10
+            bvh_node.rest_tail_world.y = bvh_node.rest_tail_world.y + global_scale / 10
+
+    return bvh_nodes
+
+
+def bvh_node_dict2objects(context, bvh_name, bvh_nodes, rotate_mode='NATIVE', frame_start=1, IMPORT_LOOP=False):
+
+    if frame_start < 1:
+        frame_start = 1
+
+    scene = context.scene
+    for obj in scene.objects:
+        obj.select = False
+
+    objects = []
+
+    def add_ob(name):
+        obj = bpy.data.objects.new(name, None)
+        scene.objects.link(obj)
+        objects.append(obj)
+        obj.select = True
+
+        # nicer drawing.
+        obj.empty_draw_type = 'CUBE'
+        obj.empty_draw_size = 0.1
+
+        return obj
+
+    # Add objects
+    for name, bvh_node in bvh_nodes.items():
+        bvh_node.temp = add_ob(name)
+        bvh_node.temp.rotation_mode = bvh_node.rot_order_str[::-1]
+
+    # Parent the objects
+    for bvh_node in bvh_nodes.values():
+        for bvh_node_child in bvh_node.children:
+            bvh_node_child.temp.parent = bvh_node.temp
+
+    # Offset
+    for bvh_node in bvh_nodes.values():
+        # Make relative to parents offset
+        bvh_node.temp.location = bvh_node.rest_head_local
+
+    # Add tail objects
+    for name, bvh_node in bvh_nodes.items():
+        if not bvh_node.children:
+            ob_end = add_ob(name + '_end')
+            ob_end.parent = bvh_node.temp
+            ob_end.location = bvh_node.rest_tail_world - bvh_node.rest_head_world
+
+    for name, bvh_node in bvh_nodes.items():
+        obj = bvh_node.temp
+
+        for frame_current in range(len(bvh_node.anim_data)):
+
+            lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current]
+
+            if bvh_node.has_loc:
+                obj.delta_location = Vector((lx, ly, lz)) - bvh_node.rest_head_world
+                obj.keyframe_insert("delta_location", index=-1, frame=frame_start + frame_current)
+
+            if bvh_node.has_rot:
+                obj.delta_rotation_euler = rx, ry, rz
+                obj.keyframe_insert("delta_rotation_euler", index=-1, frame=frame_start + frame_current)
+
+    return objects
+
+
+def bvh_node_dict2armature(context, bvh_name, bvh_nodes, rotate_mode='XYZ', frame_start=1, IMPORT_LOOP=False):
+
+    if frame_start < 1:
+        frame_start = 1
+
+    # Add the new armature,
+    scene = context.scene
+    for obj in scene.objects:
+        obj.select = False
+
+    arm_data = bpy.data.armatures.new(bvh_name)
+    arm_ob = bpy.data.objects.new(bvh_name, arm_data)
+
+    scene.objects.link(arm_ob)
+
+    arm_ob.select = True
+    scene.objects.active = arm_ob
+
+    bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
+    bpy.ops.object.mode_set(mode='EDIT', toggle=False)
+
+    # Get the average bone length for zero length bones, we may not use this.
+    average_bone_length = 0.0
+    nonzero_count = 0
+    for bvh_node in bvh_nodes.values():
+        l = (bvh_node.rest_head_local - bvh_node.rest_tail_local).length
+        if l:
+            average_bone_length += l
+            nonzero_count += 1
+
+    # Very rare cases all bones couldbe zero length???
+    if not average_bone_length:
+        average_bone_length = 0.1
+    else:
+        # Normal operation
+        average_bone_length = average_bone_length / nonzero_count
+
+    # XXX, annoying, remove bone.
+    while arm_data.edit_bones:
+        arm_ob.edit_bones.remove(arm_data.edit_bones[-1])
+
+    ZERO_AREA_BONES = []
+    for name, bvh_node in bvh_nodes.items():
+        # New editbone
+        bone = bvh_node.temp = arm_data.edit_bones.new(name)
+
+        bone.head = bvh_node.rest_head_world
+        bone.tail = bvh_node.rest_tail_world
+
+        # ZERO AREA BONES.
+        if (bone.head - bone.tail).length < 0.001:
+            if bvh_node.parent:
+                ofs = bvh_node.parent.rest_head_local - bvh_node.parent.rest_tail_local
+                if ofs.length:  # is our parent zero length also?? unlikely
+                    bone.tail = bone.tail + ofs
+                else:
+                    bone.tail.y = bone.tail.y + average_bone_length
+            else:
+                bone.tail.y = bone.tail.y + average_bone_length
+
+            ZERO_AREA_BONES.append(bone.name)
+
+    for bvh_node in bvh_nodes.values():
+        if bvh_node.parent:
+            # bvh_node.temp is the Editbone
+
+            # Set the bone parent
+            bvh_node.temp.parent = bvh_node.parent.temp
+
+            # Set the connection state
+            if not bvh_node.has_loc and\
+            bvh_node.parent and\
+            bvh_node.parent.temp.name not in ZERO_AREA_BONES and\
+            bvh_node.parent.rest_tail_local == bvh_node.rest_head_local:
+                bvh_node.temp.use_connect = True
+
+    # Replace the editbone with the editbone name,
+    # to avoid memory errors accessing the editbone outside editmode
+    for bvh_node in bvh_nodes.values():
+        bvh_node.temp = bvh_node.temp.name
+
+    # Now Apply the animation to the armature
+
+    # Get armature animation data
+    bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
+
+    pose = arm_ob.pose
+    pose_bones = pose.bones
+
+    if rotate_mode == 'NATIVE':
+        for bvh_node in bvh_nodes.values():
+            bone_name = bvh_node.temp  # may not be the same name as the bvh_node, could have been shortened.
+            pose_bone = pose_bones[bone_name]
+            pose_bone.rotation_mode = bvh_node.rot_order_str
+
+    elif rotate_mode != 'QUATERNION':
+        for pose_bone in pose_bones:
+            pose_bone.rotation_mode = rotate_mode
+    else:
+        # Quats default
+        pass
+
+    context.scene.update()
+
+    arm_ob.animation_data_create()
+    action = bpy.data.actions.new(name=bvh_name)
+    arm_ob.animation_data.action = action
+
+    # Replace the bvh_node.temp (currently an editbone)
+    # With a tuple  (pose_bone, armature_bone, bone_rest_matrix, bone_rest_matrix_inv)
+    for bvh_node in bvh_nodes.values():
+        bone_name = bvh_node.temp  # may not be the same name as the bvh_node, could have been shortened.
+        pose_bone = pose_bones[bone_name]
+        rest_bone = arm_data.bones[bone_name]
+        bone_rest_matrix = rest_bone.matrix_local.rotation_part()
+
+        bone_rest_matrix_inv = Matrix(bone_rest_matrix)
+        bone_rest_matrix_inv.invert()
+
+        bone_rest_matrix_inv.resize4x4()
+        bone_rest_matrix.resize4x4()
+        bvh_node.temp = (pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv)
+
+    # Make a dict for fast access without rebuilding a list all the time.
+
+    # KEYFRAME METHOD, SLOW, USE IPOS DIRECT
+    # TODO: use f-point samples instead (Aligorith)
+    if rotate_mode != 'QUATERNION':
+        prev_euler = [Euler() for i in range(len(bvh_nodes))]
+
+    # Animate the data, the last used bvh_node will do since they all have the same number of frames
+    for frame_current in range(len(bvh_node.anim_data) - 1):  # skip the first frame (rest frame)
+        # print frame_current
+
+        # if frame_current==40: # debugging
+        # 	break
+
+        scene.frame_set(frame_start + frame_current)
+
+        # Dont neet to set the current frame
+        for i, bvh_node in enumerate(bvh_nodes.values()):
+            pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv = bvh_node.temp
+            lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current + 1]
+
+            if bvh_node.has_rot:
+                # apply rotation order and convert to XYZ
+                # note that the rot_order_str is reversed.
+                bone_rotation_matrix = Euler((rx, ry, rz), bvh_node.rot_order_str[::-1]).to_matrix().resize4x4()
+                bone_rotation_matrix = bone_rest_matrix_inv * bone_rotation_matrix * bone_rest_matrix
+
+                if rotate_mode == 'QUATERNION':
+                    pose_bone.rotation_quaternion = bone_rotation_matrix.to_quat()
+                else:
+                    euler = bone_rotation_matrix.to_euler(bvh_node.rot_order_str, prev_euler[i])
+                    pose_bone.rotation_euler = euler
+                    prev_euler[i] = euler
+
+            if bvh_node.has_loc:
+                pose_bone.location = (bone_rest_matrix_inv * Matrix.Translation(Vector((lx, ly, lz)) - bvh_node.rest_head_local)).translation_part()
+
+            if bvh_node.has_loc:
+                pose_bone.keyframe_insert("location")
+            if bvh_node.has_rot:
+                if rotate_mode == 'QUATERNION':
+                    pose_bone.keyframe_insert("rotation_quaternion")
+                else:
+                    pose_bone.keyframe_insert("rotation_euler")
+
+    for cu in action.fcurves:
+        if IMPORT_LOOP:
+            pass  # 2.5 doenst have cyclic now?
+
+        for bez in cu.keyframe_points:
+            bez.interpolation = 'LINEAR'
+
+    return arm_ob
+
+
+def load(operator, context, filepath="", target='ARMATURE', rotate_mode='NATIVE', global_scale=1.0, use_cyclic=False, frame_start=1):
+    import time
+    t1 = time.time()
+    print('\tparsing bvh %r...' % filepath, end="")
+
+    bvh_nodes = read_bvh(context, filepath,
+            rotate_mode=rotate_mode,
+            global_scale=global_scale)
+
+    print('%.4f' % (time.time() - t1))
+
+    frame_orig = context.scene.frame_current
+
+    t1 = time.time()
+    print('\timporting to blender...', end="")
+
+    bvh_name = bpy.path.display_name_from_filepath(filepath)
+
+    if target == 'ARMATURE':
+        bvh_node_dict2armature(context, bvh_name, bvh_nodes,
+                rotate_mode=rotate_mode,
+                frame_start=frame_start,
+                IMPORT_LOOP=use_cyclic)
+
+    elif target == 'OBJECT':
+        bvh_node_dict2objects(context, bvh_name, bvh_nodes,
+                rotate_mode=rotate_mode,
+                frame_start=frame_start,
+                IMPORT_LOOP=use_cyclic)
+
+    else:
+        raise Exception("invalid type")
+
+    print('Done in %.4f\n' % (time.time() - t1))
+
+    context.scene.frame_set(frame_orig)
+
+    return {'FINISHED'}
diff --git a/io_mesh_ply/__init__.py b/io_mesh_ply/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdac6fa2bab3789562f2d0c3e432fcbdd70f0ffb
--- /dev/null
+++ b/io_mesh_ply/__init__.py
@@ -0,0 +1,112 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+    "name": "Stanford PLY format",
+    "author": "Bruce Merry, Campbell Barton",
+    "location": "File > Import-Export",
+    "description": "Import-Export PLY mesh data withs UV's and vertex colors",
+    "warning": "",
+    "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+        "Scripts/Import-Export/Stanford_PLY",
+    "tracker_url": "",
+    "support": 'OFFICIAL',
+    "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+    import imp
+    if "export_ply" in locals():
+        imp.reload(export_ply)
+    if "import_ply" in locals():
+        imp.reload(import_ply)
+
+
+import bpy
+from bpy.props import *
+from io_utils import ImportHelper, ExportHelper
+
+
+class ImportPLY(bpy.types.Operator, ImportHelper):
+    '''Load a BVH motion capture file'''
+    bl_idname = "import_mesh.ply"
+    bl_label = "Import PLY"
+
+    filename_ext = ".ply"
+    filter_glob = StringProperty(default="*.ply", options={'HIDDEN'})
+
+    def execute(self, context):
+        from . import import_ply
+        return import_ply.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
+
+
+class ExportPLY(bpy.types.Operator, ExportHelper):
+    '''Export a single object as a stanford PLY with normals, colours and texture coordinates.'''
+    bl_idname = "export_mesh.ply"
+    bl_label = "Export PLY"
+
+    filename_ext = ".ply"
+    filter_glob = StringProperty(default="*.ply", options={'HIDDEN'})
+
+    use_modifiers = BoolProperty(name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default=True)
+    use_normals = BoolProperty(name="Normals", description="Export Normals for smooth and hard shaded faces", default=True)
+    use_uv_coords = BoolProperty(name="UVs", description="Exort the active UV layer", default=True)
+    use_colors = BoolProperty(name="Vertex Colors", description="Exort the active vertex color layer", default=True)
+
+    @classmethod
+    def poll(cls, context):
+        return context.active_object != None
+
+    def execute(self, context):
+        filepath = self.filepath
+        filepath = bpy.path.ensure_ext(filepath, self.filename_ext)
+        from . import export_ply
+        return export_ply.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+    def draw(self, context):
+        layout = self.layout
+
+        row = layout.row()
+        row.prop(self, "use_modifiers")
+        row.prop(self, "use_normals")
+        row = layout.row()
+        row.prop(self, "use_uv_coords")
+        row.prop(self, "use_colors")
+
+
+def menu_func_import(self, context):
+    self.layout.operator(ImportPLY.bl_idname, text="Stanford (.ply)")
+
+
+def menu_func_export(self, context):
+    self.layout.operator(ExportPLY.bl_idname, text="Stanford (.ply)")
+
+
+def register():
+    bpy.types.INFO_MT_file_import.append(menu_func_import)
+    bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+    bpy.types.INFO_MT_file_import.remove(menu_func_import)
+    bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+if __name__ == "__main__":
+    register()
diff --git a/io_mesh_ply/export_ply.py b/io_mesh_ply/export_ply.py
new file mode 100644
index 0000000000000000000000000000000000000000..271a2d23207a063a3a5cce9e3494562d6dac7296
--- /dev/null
+++ b/io_mesh_ply/export_ply.py
@@ -0,0 +1,203 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za
+# Contributors: Bruce Merry, Campbell Barton
+
+"""
+This script exports Stanford PLY files from Blender. It supports normals,
+colours, and texture coordinates per face or per vertex.
+Only one mesh can be exported at a time.
+"""
+
+import bpy
+import os
+
+
+def save(operator, context, filepath="", use_modifiers=True, use_normals=True, use_uv_coords=True, use_colors=True):
+
+    def rvec3d(v):
+        return round(v[0], 6), round(v[1], 6), round(v[2], 6)
+
+    def rvec2d(v):
+        return round(v[0], 6), round(v[1], 6)
+
+    scene = context.scene
+    obj = context.object
+
+    if not obj:
+        raise Exception("Error, Select 1 active object")
+
+    file = open(filepath, 'w')
+
+    if scene.objects.active:
+        bpy.ops.object.mode_set(mode='OBJECT')
+
+    if use_modifiers:
+        mesh = obj.create_mesh(scene, True, 'PREVIEW')
+    else:
+        mesh = obj.data
+
+    if not mesh:
+        raise Exception("Error, could not get mesh data from active object")
+
+    # mesh.transform(obj.matrix_world) # XXX
+
+    faceUV = (len(mesh.uv_textures) > 0)
+    vertexUV = (len(mesh.sticky) > 0)
+    vertexColors = len(mesh.vertex_colors) > 0
+
+    if (not faceUV) and (not vertexUV):
+        use_uv_coords = False
+    if not vertexColors:
+        use_colors = False
+
+    if not use_uv_coords:
+        faceUV = vertexUV = False
+    if not use_colors:
+        vertexColors = False
+
+    if faceUV:
+        active_uv_layer = mesh.uv_textures.active
+        if not active_uv_layer:
+            use_uv_coords = False
+            faceUV = None
+        else:
+            active_uv_layer = active_uv_layer.data
+
+    if vertexColors:
+        active_col_layer = mesh.vertex_colors.active
+        if not active_col_layer:
+            use_colors = False
+            vertexColors = None
+        else:
+            active_col_layer = active_col_layer.data
+
+    # incase
+    color = uvcoord = uvcoord_key = normal = normal_key = None
+
+    mesh_verts = mesh.vertices  # save a lookup
+    ply_verts = []  # list of dictionaries
+    # vdict = {} # (index, normal, uv) -> new index
+    vdict = [{} for i in range(len(mesh_verts))]
+    ply_faces = [[] for f in range(len(mesh.faces))]
+    vert_count = 0
+    for i, f in enumerate(mesh.faces):
+
+        smooth = f.use_smooth
+        if not smooth:
+            normal = tuple(f.normal)
+            normal_key = rvec3d(normal)
+
+        if faceUV:
+            uv = active_uv_layer[i]
+            uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4  # XXX - crufty :/
+        if vertexColors:
+            col = active_col_layer[i]
+            col = col.color1[:], col.color2[:], col.color3[:], col.color4[:]
+
+        f_verts = f.vertices
+
+        pf = ply_faces[i]
+        for j, vidx in enumerate(f_verts):
+            v = mesh_verts[vidx]
+
+            if smooth:
+                normal = tuple(v.normal)
+                normal_key = rvec3d(normal)
+
+            if faceUV:
+                uvcoord = uv[j][0], 1.0 - uv[j][1]
+                uvcoord_key = rvec2d(uvcoord)
+            elif vertexUV:
+                uvcoord = v.uvco[0], 1.0 - v.uvco[1]
+                uvcoord_key = rvec2d(uvcoord)
+
+            if vertexColors:
+                color = col[j]
+                color = int(color[0] * 255.0), int(color[1] * 255.0), int(color[2] * 255.0)
+
+            key = normal_key, uvcoord_key, color
+
+            vdict_local = vdict[vidx]
+            pf_vidx = vdict_local.get(key)  # Will be None initially
+
+            if pf_vidx is None:  # same as vdict_local.has_key(key)
+                pf_vidx = vdict_local[key] = vert_count
+                ply_verts.append((vidx, normal, uvcoord, color))
+                vert_count += 1
+
+            pf.append(pf_vidx)
+
+    file.write('ply\n')
+    file.write('format ascii 1.0\n')
+    file.write('comment Created by Blender %s - www.blender.org, source file: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
+
+    file.write('element vertex %d\n' % len(ply_verts))
+
+    file.write('property float x\n')
+    file.write('property float y\n')
+    file.write('property float z\n')
+
+    if use_normals:
+        file.write('property float nx\n')
+        file.write('property float ny\n')
+        file.write('property float nz\n')
+    if use_uv_coords:
+        file.write('property float s\n')
+        file.write('property float t\n')
+    if use_colors:
+        file.write('property uchar red\n')
+        file.write('property uchar green\n')
+        file.write('property uchar blue\n')
+
+    file.write('element face %d\n' % len(mesh.faces))
+    file.write('property list uchar uint vertex_indices\n')
+    file.write('end_header\n')
+
+    for i, v in enumerate(ply_verts):
+        file.write('%.6f %.6f %.6f ' % mesh_verts[v[0]].co[:])  # co
+        if use_normals:
+            file.write('%.6f %.6f %.6f ' % v[1])  # no
+        if use_uv_coords:
+            file.write('%.6f %.6f ' % v[2])  # uv
+        if use_colors:
+            file.write('%u %u %u' % v[3])  # col
+        file.write('\n')
+
+    for pf in ply_faces:
+        if len(pf) == 3:
+            file.write('3 %d %d %d\n' % tuple(pf))
+        else:
+            file.write('4 %d %d %d %d\n' % tuple(pf))
+
+    file.close()
+    print("writing %r done" % filepath)
+
+    if use_modifiers:
+        bpy.data.meshes.remove(mesh)
+
+    # XXX
+    """
+    if is_editmode:
+        Blender.Window.EditMode(1, '', 0)
+    """
+
+    return {'FINISHED'}
diff --git a/io_mesh_ply/import_ply.py b/io_mesh_ply/import_ply.py
new file mode 100644
index 0000000000000000000000000000000000000000..6de90b1e86ef2c0c673933a21d0e88d0b1337540
--- /dev/null
+++ b/io_mesh_ply/import_ply.py
@@ -0,0 +1,339 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import re
+import struct
+
+
+class element_spec(object):
+    __slots__ = ("name",
+                 "count",
+                 "properties",
+                 )
+
+    def __init__(self, name, count):
+        self.name = name
+        self.count = count
+        self.properties = []
+
+    def load(self, format, stream):
+        if format == 'ascii':
+            stream = re.split('\s+', stream.readline())
+        return [x.load(format, stream) for x in self.properties]
+
+    def index(self, name):
+        for i, p in enumerate(self.properties):
+            if p.name == name:
+                return i
+        return -1
+
+
+class property_spec(object):
+    __slots__ = ("name",
+                 "list_type",
+                 "numeric_type",
+                 )
+
+    def __init__(self, name, list_type, numeric_type):
+        self.name = name
+        self.list_type = list_type
+        self.numeric_type = numeric_type
+
+    def read_format(self, format, count, num_type, stream):
+        if format == 'ascii':
+            if num_type == 's':
+                ans = []
+                for i in range(count):
+                    s = stream[i]
+                    if len(s) < 2 or s[0] != '"' or s[-1] != '"':
+                        print('Invalid string', s)
+                        print('Note: ply_import.py does not handle whitespace in strings')
+                        return None
+                    ans.append(s[1:-1])
+                stream[:count] = []
+                return ans
+            if num_type == 'f' or num_type == 'd':
+                mapper = float
+            else:
+                mapper = int
+            ans = [mapper(x) for x in stream[:count]]
+            stream[:count] = []
+            return ans
+        else:
+            if num_type == 's':
+                ans = []
+                for i in range(count):
+                    fmt = format + 'i'
+                    data = stream.read(struct.calcsize(fmt))
+                    length = struct.unpack(fmt, data)[0]
+                    fmt = '%s%is' % (format, length)
+                    data = stream.read(struct.calcsize(fmt))
+                    s = struct.unpack(fmt, data)[0]
+                    ans.append(s[:-1])  # strip the NULL
+                return ans
+            else:
+                fmt = '%s%i%s' % (format, count, num_type)
+                data = stream.read(struct.calcsize(fmt))
+                return struct.unpack(fmt, data)
+
+    def load(self, format, stream):
+        if self.list_type is not None:
+            count = int(self.read_format(format, 1, self.list_type, stream)[0])
+            return self.read_format(format, count, self.numeric_type, stream)
+        else:
+            return self.read_format(format, 1, self.numeric_type, stream)[0]
+
+
+class object_spec(object):
+    __slots__ = ("specs",
+                )
+    'A list of element_specs'
+    def __init__(self):
+        self.specs = []
+
+    def load(self, format, stream):
+        return dict([(i.name, [i.load(format, stream) for j in range(i.count)]) for i in self.specs])
+
+        '''
+        # Longhand for above LC
+        answer = {}
+        for i in self.specs:
+            answer[i.name] = []
+            for j in range(i.count):
+                if not j % 100 and meshtools.show_progress:
+                    Blender.Window.DrawProgressBar(float(j) / i.count, 'Loading ' + i.name)
+                answer[i.name].append(i.load(format, stream))
+        return answer
+            '''
+
+
+def read(filepath):
+    format = ''
+    version = '1.0'
+    format_specs = {'binary_little_endian': '<',
+            'binary_big_endian': '>',
+            'ascii': 'ascii'}
+    type_specs = {'char': 'b',
+              'uchar': 'B',
+              'int8': 'b',
+              'uint8': 'B',
+              'int16': 'h',
+              'uint16': 'H',
+              'ushort': 'H',
+              'int': 'i',
+              'int32': 'i',
+              'uint': 'I',
+              'uint32': 'I',
+              'float': 'f',
+              'float32': 'f',
+              'float64': 'd',
+              'double': 'd',
+              'string': 's'}
+    obj_spec = object_spec()
+
+    try:
+        file = open(filepath, 'rU')  # Only for parsing the header, not binary data
+        signature = file.readline()
+
+        if not signature.startswith('ply'):
+            print('Signature line was invalid')
+            return None
+
+        while 1:
+            tokens = re.split(r'[ \n]+', file.readline())
+
+            if len(tokens) == 0:
+                continue
+            if tokens[0] == 'end_header':
+                break
+            elif tokens[0] == 'comment' or tokens[0] == 'obj_info':
+                continue
+            elif tokens[0] == 'format':
+                if len(tokens) < 3:
+                    print('Invalid format line')
+                    return None
+                if tokens[1] not in format_specs:  # .keys(): # keys is implicit
+                    print('Unknown format', tokens[1])
+                    return None
+                if tokens[2] != version:
+                    print('Unknown version', tokens[2])
+                    return None
+                format = tokens[1]
+            elif tokens[0] == 'element':
+                if len(tokens) < 3:
+                    print('Invalid element line')
+                    return None
+                obj_spec.specs.append(element_spec(tokens[1], int(tokens[2])))
+            elif tokens[0] == 'property':
+                if not len(obj_spec.specs):
+                    print('Property without element')
+                    return None
+                if tokens[1] == 'list':
+                    obj_spec.specs[-1].properties.append(property_spec(tokens[4], type_specs[tokens[2]], type_specs[tokens[3]]))
+                else:
+                    obj_spec.specs[-1].properties.append(property_spec(tokens[2], None, type_specs[tokens[1]]))
+
+        if format != 'ascii':
+            file.close()  # was ascii, now binary
+            file = open(filepath, 'rb')
+
+            # skip the header...
+            while not file.readline().startswith('end_header'):
+                pass
+
+        obj = obj_spec.load(format_specs[format], file)
+
+    except IOError:
+        try:
+            file.close()
+        except:
+            pass
+
+        return None
+    try:
+        file.close()
+    except:
+        pass
+
+    return obj_spec, obj
+
+
+import bpy
+
+
+def load_ply(filepath):
+    import time
+    from io_utils import load_image, unpack_list, unpack_face_list
+
+    t = time.time()
+    obj_spec, obj = read(filepath)
+    if obj is None:
+        print('Invalid file')
+        return
+
+    uvindices = colindices = None
+    # noindices = None # Ignore normals
+
+    for el in obj_spec.specs:
+        if el.name == 'vertex':
+            vindices = vindices_x, vindices_y, vindices_z = (el.index('x'), el.index('y'), el.index('z'))
+            # noindices = (el.index('nx'), el.index('ny'), el.index('nz'))
+            # if -1 in noindices: noindices = None
+            uvindices = (el.index('s'), el.index('t'))
+            if -1 in uvindices:
+                uvindices = None
+            colindices = (el.index('red'), el.index('green'), el.index('blue'))
+            if -1 in colindices:
+                colindices = None
+        elif el.name == 'face':
+            findex = el.index('vertex_indices')
+
+    mesh_faces = []
+    mesh_uvs = []
+    mesh_colors = []
+
+    def add_face(vertices, indices, uvindices, colindices):
+        mesh_faces.append(indices)
+        if uvindices:
+            mesh_uvs.append([(vertices[index][uvindices[0]], 1.0 - vertices[index][uvindices[1]]) for index in indices])
+        if colindices:
+            mesh_colors.append([(vertices[index][colindices[0]], vertices[index][colindices[1]], vertices[index][colindices[2]]) for index in indices])
+
+    if uvindices or colindices:
+        # If we have Cols or UVs then we need to check the face order.
+        add_face_simple = add_face
+
+        # EVIL EEKADOODLE - face order annoyance.
+        def add_face(vertices, indices, uvindices, colindices):
+            if len(indices) == 4:
+                if indices[2] == 0 or indices[3] == 0:
+                    indices = indices[2], indices[3], indices[0], indices[1]
+            elif len(indices) == 3:
+                if indices[2] == 0:
+                    indices = indices[1], indices[2], indices[0]
+
+            add_face_simple(vertices, indices, uvindices, colindices)
+
+    verts = obj['vertex']
+
+    if 'face' in obj:
+        for f in obj['face']:
+            ind = f[findex]
+            len_ind = len(ind)
+            if len_ind <= 4:
+                add_face(verts, ind, uvindices, colindices)
+            else:
+                # Fan fill the face
+                for j in range(len_ind - 2):
+                    add_face(verts, (ind[0], ind[j + 1], ind[j + 2]), uvindices, colindices)
+
+    ply_name = bpy.path.display_name_from_filepath(filepath)
+
+    mesh = bpy.data.meshes.new(name=ply_name)
+
+    mesh.vertices.add(len(obj['vertex']))
+
+    mesh.vertices.foreach_set("co", [a for v in obj['vertex'] for a in (v[vindices_x], v[vindices_y], v[vindices_z])])
+
+    if mesh_faces:
+        mesh.faces.add(len(mesh_faces))
+        mesh.faces.foreach_set("vertices_raw", unpack_face_list(mesh_faces))
+
+        if uvindices or colindices:
+            if uvindices:
+                uvlay = mesh.uv_textures.new()
+            if colindices:
+                vcol_lay = mesh.vertex_colors.new()
+
+            if uvindices:
+                for i, f in enumerate(uvlay.data):
+                    ply_uv = mesh_uvs[i]
+                    for j, uv in enumerate(f.uv):
+                        uv[:] = ply_uv[j]
+
+            if colindices:
+                faces = obj['face']
+                for i, f in enumerate(vcol_lay.data):
+                    # XXX, colors dont come in right, needs further investigation.
+                    ply_col = mesh_colors[i]
+                    if len(faces[i]) == 4:
+                        f_col = f.color1, f.color2, f.color3, f.color4
+                    else:
+                        f_col = f.color1, f.color2, f.color3
+
+                    for j, col in enumerate(f_col):
+                        col.r, col.g, col.b = ply_col[j]
+
+    mesh.update()
+
+    scn = bpy.context.scene
+    #scn.objects.selected = [] # XXX25
+
+    obj = bpy.data.objects.new(ply_name, mesh)
+    scn.objects.link(obj)
+    scn.objects.active = obj
+    obj.select = True
+
+    print('\nSuccessfully imported %r in %.3f sec' % (filepath, time.time() - t))
+
+
+def load(operator, context, filepath=""):
+    load_ply(filepath)
+    return {'FINISHED'}
diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e701abeb6ba889e37d21c3a764958d3dbe0730e
--- /dev/null
+++ b/io_scene_3ds/__init__.py
@@ -0,0 +1,100 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+    "name": "Autodesk 3DS format",
+    "author": "Bob Holcomb, Campbell Barton",
+    "location": "File > Import-Export",
+    "description": "Import-Export 3DS, meshes, uvs, materials, textures, cameras & lamps",
+    "warning": "",
+    "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+        "Scripts/Import-Export/Autodesk_3DS",
+    "tracker_url": "",
+    "support": 'OFFICIAL',
+    "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+    import imp
+    if "import_3ds" in locals():
+        imp.reload(import_3ds)
+    if "export_3ds" in locals():
+        imp.reload(export_3ds)
+
+
+import bpy
+from bpy.props import *
+from io_utils import ImportHelper, ExportHelper
+
+
+class Import3DS(bpy.types.Operator, ImportHelper):
+    '''Import from 3DS file format (.3ds)'''
+    bl_idname = "import_scene.autodesk_3ds"
+    bl_label = 'Import 3DS'
+
+    filename_ext = ".3ds"
+    filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
+
+    constrain_size = FloatProperty(name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0)
+    use_image_search = BoolProperty(name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True)
+    use_apply_transform = BoolProperty(name="Apply Transform", description="Workaround for object transformations importing incorrectly", default=True)
+
+    def execute(self, context):
+        from . import import_3ds
+        return import_3ds.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
+
+
+class Export3DS(bpy.types.Operator, ExportHelper):
+    '''Export to 3DS file format (.3ds)'''
+    bl_idname = "export_scene.autodesk_3ds"
+    bl_label = 'Export 3DS'
+
+    filename_ext = ".3ds"
+    filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
+
+    def execute(self, context):
+        from . import export_3ds
+        return export_3ds.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+
+# Add to a menu
+def menu_func_export(self, context):
+    self.layout.operator(Export3DS.bl_idname, text="3D Studio (.3ds)")
+
+
+def menu_func_import(self, context):
+    self.layout.operator(Import3DS.bl_idname, text="3D Studio (.3ds)")
+
+
+def register():
+    bpy.types.INFO_MT_file_import.append(menu_func_import)
+    bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+    bpy.types.INFO_MT_file_import.remove(menu_func_import)
+    bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+# NOTES:
+# why add 1 extra vertex? and remove it when done? - "Answer - eekadoodle - would need to re-order UV's without this since face order isnt always what we give blender, BMesh will solve :D"
+# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time)
+
+if __name__ == "__main__":
+    register()
diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ed8fbce85eaadfcdbc61943ee470b45db95e8c
--- /dev/null
+++ b/io_scene_3ds/export_3ds.py
@@ -0,0 +1,1044 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Bob Holcomb
+# Contributors: Campbell Barton, Bob Holcomb, Richard Lärkäng, Damien McGinnes, Mark Stijnman
+
+"""
+Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information
+from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode.
+"""
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will export
+#----- Primary Chunk, at the beginning of each file
+PRIMARY= 0x4D4D
+
+#------ Main Chunks
+OBJECTINFO   =      0x3D3D      #This gives the version of the mesh and is found right before the material and object information
+VERSION      =      0x0002      #This gives the version of the .3ds file
+KFDATA       =      0xB000      #This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL=45055		#0xAFFF				// This stored the texture info
+OBJECT=16384		#0x4000				// This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+MATNAME    =      0xA000      # This holds the material name
+MATAMBIENT   =      0xA010      # Ambient color of the object/material
+MATDIFFUSE   =      0xA020      # This holds the color of the object/material
+MATSPECULAR   =      0xA030      # SPecular color of the object/material
+MATSHINESS   =      0xA040      # ??
+MATMAP       =      0xA200      # This is a header for a new material
+MATMAPFILE    =      0xA300      # This holds the file name of the texture
+
+RGB1=	0x0011
+RGB2=	0x0012
+
+#>------ sub defines of OBJECT
+OBJECT_MESH  =      0x4100      # This lets us know that we are reading a new object
+OBJECT_LIGHT =      0x4600      # This lets un know we are reading a light object
+OBJECT_CAMERA=      0x4700      # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES=   0x4720      # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES =   0x4110      # The objects vertices
+OBJECT_FACES    =   0x4120      # The objects faces
+OBJECT_MATERIAL =   0x4130      # This is found if the object has a material, either texture map or color
+OBJECT_UV       =   0x4140      # The UV texture coordinates
+OBJECT_TRANS_MATRIX  =   0x4160 # The Object Matrix
+
+#>------ sub defines of KFDATA
+KFDATA_KFHDR            = 0xB00A
+KFDATA_KFSEG            = 0xB008
+KFDATA_KFCURTIME        = 0xB009
+KFDATA_OBJECT_NODE_TAG  = 0xB002
+
+#>------ sub defines of OBJECT_NODE_TAG
+OBJECT_NODE_ID          = 0xB030
+OBJECT_NODE_HDR         = 0xB010
+OBJECT_PIVOT            = 0xB013
+OBJECT_INSTANCE_NAME    = 0xB011
+POS_TRACK_TAG			= 0xB020
+ROT_TRACK_TAG			= 0xB021
+SCL_TRACK_TAG			= 0xB022
+
+import struct
+
+# So 3ds max can open files, limit names to 12 in length
+# this is verry annoying for filenames!
+name_unique = []
+name_mapping = {}
+def sane_name(name):
+    name_fixed = name_mapping.get(name)
+    if name_fixed is not None:
+        return name_fixed
+
+    new_name = name[:12]
+
+    i = 0
+
+    while new_name in name_unique:
+        new_name = new_name[:-4] + '.%.3d' % i
+        i+=1
+
+    name_unique.append(new_name)
+    name_mapping[name] = new_name
+    return new_name
+
+def uv_key(uv):
+    return round(uv[0], 6), round(uv[1], 6)
+
+# size defines:
+SZ_SHORT = 2
+SZ_INT   = 4
+SZ_FLOAT = 4
+
+class _3ds_short(object):
+    '''Class representing a short (2-byte integer) for a 3ds file.
+    *** This looks like an unsigned short H is unsigned from the struct docs - Cam***'''
+    __slots__ = ('value', )
+    def __init__(self, val=0):
+        self.value = val
+
+    def get_size(self):
+        return SZ_SHORT
+
+    def write(self,file):
+        file.write(struct.pack("<H", self.value))
+
+    def __str__(self):
+        return str(self.value)
+
+class _3ds_int(object):
+    '''Class representing an int (4-byte integer) for a 3ds file.'''
+    __slots__ = ('value', )
+    def __init__(self, val):
+        self.value = val
+
+    def get_size(self):
+        return SZ_INT
+
+    def write(self,file):
+        file.write(struct.pack("<I", self.value))
+
+    def __str__(self):
+        return str(self.value)
+
+class _3ds_float(object):
+    '''Class representing a 4-byte IEEE floating point number for a 3ds file.'''
+    __slots__ = ('value', )
+    def __init__(self, val):
+        self.value=val
+
+    def get_size(self):
+        return SZ_FLOAT
+
+    def write(self,file):
+        file.write(struct.pack("<f", self.value))
+
+    def __str__(self):
+        return str(self.value)
+
+
+class _3ds_string(object):
+    '''Class representing a zero-terminated string for a 3ds file.'''
+    __slots__ = ('value', )
+    def __init__(self, val):
+        self.value=val
+
+    def get_size(self):
+        return (len(self.value)+1)
+
+    def write(self,file):
+        binary_format = "<%ds" % (len(self.value)+1)
+        file.write(struct.pack(binary_format, self.value))
+
+    def __str__(self):
+        return self.value
+
+class _3ds_point_3d(object):
+    '''Class representing a three-dimensional point for a 3ds file.'''
+    __slots__ = 'x','y','z'
+    def __init__(self, point):
+        self.x, self.y, self.z = point
+
+    def get_size(self):
+        return 3*SZ_FLOAT
+
+    def write(self,file):
+        file.write(struct.pack('<3f', self.x, self.y, self.z))
+
+    def __str__(self):
+        return '(%f, %f, %f)' % (self.x, self.y, self.z)
+
+# Used for writing a track
+"""
+class _3ds_point_4d(object):
+    '''Class representing a four-dimensional point for a 3ds file, for instance a quaternion.'''
+    __slots__ = 'x','y','z','w'
+    def __init__(self, point=(0.0,0.0,0.0,0.0)):
+        self.x, self.y, self.z, self.w = point
+
+    def get_size(self):
+        return 4*SZ_FLOAT
+
+    def write(self,file):
+        data=struct.pack('<4f', self.x, self.y, self.z, self.w)
+        file.write(data)
+
+    def __str__(self):
+        return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w)
+"""
+
+class _3ds_point_uv(object):
+    '''Class representing a UV-coordinate for a 3ds file.'''
+    __slots__ = ('uv', )
+    def __init__(self, point):
+        self.uv = point
+
+    def __cmp__(self, other):
+        return cmp(self.uv,other.uv)
+
+    def get_size(self):
+        return 2*SZ_FLOAT
+
+    def write(self,file):
+        data=struct.pack('<2f', self.uv[0], self.uv[1])
+        file.write(data)
+
+    def __str__(self):
+        return '(%g, %g)' % self.uv
+
+class _3ds_rgb_color(object):
+    '''Class representing a (24-bit) rgb color for a 3ds file.'''
+    __slots__ = 'r','g','b'
+    def __init__(self, col):
+        self.r, self.g, self.b = col
+
+    def get_size(self):
+        return 3
+
+    def write(self,file):
+        file.write( struct.pack('<3B', int(255*self.r), int(255*self.g), int(255*self.b) ) )
+# 		file.write( struct.pack('<3c', chr(int(255*self.r)), chr(int(255*self.g)), chr(int(255*self.b)) ) )
+
+    def __str__(self):
+        return '{%f, %f, %f}' % (self.r, self.g, self.b)
+
+class _3ds_face(object):
+    '''Class representing a face for a 3ds file.'''
+    __slots__ = ('vindex', )
+    def __init__(self, vindex):
+        self.vindex = vindex
+
+    def get_size(self):
+        return 4*SZ_SHORT
+
+    def write(self,file):
+        # The last zero is only used by 3d studio
+        file.write(struct.pack("<4H", self.vindex[0],self.vindex[1], self.vindex[2], 0))
+
+    def __str__(self):
+        return '[%d %d %d]' % (self.vindex[0],self.vindex[1], self.vindex[2])
+
+class _3ds_array(object):
+    '''Class representing an array of variables for a 3ds file.
+
+    Consists of a _3ds_short to indicate the number of items, followed by the items themselves.
+    '''
+    __slots__ = 'values', 'size'
+    def __init__(self):
+        self.values=[]
+        self.size=SZ_SHORT
+
+    # add an item:
+    def add(self,item):
+        self.values.append(item)
+        self.size+=item.get_size()
+
+    def get_size(self):
+        return self.size
+
+    def write(self,file):
+        _3ds_short(len(self.values)).write(file)
+        #_3ds_int(len(self.values)).write(file)
+        for value in self.values:
+            value.write(file)
+
+    # To not overwhelm the output in a dump, a _3ds_array only
+    # outputs the number of items, not all of the actual items.
+    def __str__(self):
+        return '(%d items)' % len(self.values)
+
+class _3ds_named_variable(object):
+    '''Convenience class for named variables.'''
+
+    __slots__ = 'value', 'name'
+    def __init__(self, name, val=None):
+        self.name=name
+        self.value=val
+
+    def get_size(self):
+        if self.value is None:
+            return 0
+        else:
+            return self.value.get_size()
+
+    def write(self, file):
+        if self.value is not None:
+            self.value.write(file)
+
+    def dump(self,indent):
+        if self.value is not None:
+            spaces=""
+            for i in range(indent):
+                spaces += "  "
+            if (self.name!=""):
+                print(spaces, self.name, " = ", self.value)
+            else:
+                print(spaces, "[unnamed]", " = ", self.value)
+
+
+#the chunk class
+class _3ds_chunk(object):
+    '''Class representing a chunk in a 3ds file.
+
+    Chunks contain zero or more variables, followed by zero or more subchunks.
+    '''
+    __slots__ = 'ID', 'size', 'variables', 'subchunks'
+    def __init__(self, id=0):
+        self.ID=_3ds_short(id)
+        self.size=_3ds_int(0)
+        self.variables=[]
+        self.subchunks=[]
+
+    def set_ID(id):
+        self.ID=_3ds_short(id)
+
+    def add_variable(self, name, var):
+        '''Add a named variable.
+
+        The name is mostly for debugging purposes.'''
+        self.variables.append(_3ds_named_variable(name,var))
+
+    def add_subchunk(self, chunk):
+        '''Add a subchunk.'''
+        self.subchunks.append(chunk)
+
+    def get_size(self):
+        '''Calculate the size of the chunk and return it.
+
+        The sizes of the variables and subchunks are used to determine this chunk\'s size.'''
+        tmpsize=self.ID.get_size()+self.size.get_size()
+        for variable in self.variables:
+            tmpsize+=variable.get_size()
+        for subchunk in self.subchunks:
+            tmpsize+=subchunk.get_size()
+        self.size.value=tmpsize
+        return self.size.value
+
+    def write(self, file):
+        '''Write the chunk to a file.
+
+        Uses the write function of the variables and the subchunks to do the actual work.'''
+        #write header
+        self.ID.write(file)
+        self.size.write(file)
+        for variable in self.variables:
+            variable.write(file)
+        for subchunk in self.subchunks:
+            subchunk.write(file)
+
+
+    def dump(self, indent=0):
+        '''Write the chunk to a file.
+
+        Dump is used for debugging purposes, to dump the contents of a chunk to the standard output.
+        Uses the dump function of the named variables and the subchunks to do the actual work.'''
+        spaces=""
+        for i in range(indent):
+            spaces += "  "
+        print(spaces, "ID=", hex(self.ID.value), "size=", self.get_size())
+        for variable in self.variables:
+            variable.dump(indent+1)
+        for subchunk in self.subchunks:
+            subchunk.dump(indent+1)
+
+
+
+######################################################
+# EXPORT
+######################################################
+
+def get_material_images(material):
+    # blender utility func.
+    if material:
+        return [s.texture.image for s in material.texture_slots if s and s.texture.type == 'IMAGE' and s.texture.image]
+
+    return []
+# 	images = []
+# 	if material:
+# 		for mtex in material.getTextures():
+# 			if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
+# 				image = mtex.tex.image
+# 				if image:
+# 					images.append(image) # maye want to include info like diffuse, spec here.
+# 	return images
+
+
+def make_material_subchunk(id, color):
+    '''Make a material subchunk.
+
+    Used for color subchunks, such as diffuse color or ambient color subchunks.'''
+    mat_sub = _3ds_chunk(id)
+    col1 = _3ds_chunk(RGB1)
+    col1.add_variable("color1", _3ds_rgb_color(color))
+    mat_sub.add_subchunk(col1)
+# optional:
+#	col2 = _3ds_chunk(RGB1)
+#	col2.add_variable("color2", _3ds_rgb_color(color))
+#	mat_sub.add_subchunk(col2)
+    return mat_sub
+
+
+def make_material_texture_chunk(id, images):
+    """Make Material Map texture chunk
+    """
+    mat_sub = _3ds_chunk(id)
+
+    def add_image(img):
+        import os
+        filename = os.path.basename(image.filepath)
+        mat_sub_file = _3ds_chunk(MATMAPFILE)
+        mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
+        mat_sub.add_subchunk(mat_sub_file)
+
+    for image in images:
+        add_image(image)
+
+    return mat_sub
+
+def make_material_chunk(material, image):
+    '''Make a material chunk out of a blender material.'''
+    material_chunk = _3ds_chunk(MATERIAL)
+    name = _3ds_chunk(MATNAME)
+
+    if material:	name_str = material.name
+    else:			name_str = 'None'
+    if image:	name_str += image.name
+
+    name.add_variable("name", _3ds_string(sane_name(name_str)))
+    material_chunk.add_subchunk(name)
+
+    if not material:
+        material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0,0,0) ))
+        material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (.8, .8, .8) ))
+        material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1,1,1) ))
+
+    else:
+        material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.ambient for a in material.diffuse_color] ))
+# 		material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.amb for a in material.rgbCol] ))
+        material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.diffuse_color))
+# 		material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.rgbCol))
+        material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color))
+# 		material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specCol))
+
+        images = get_material_images(material) # can be None
+        if image: images.append(image)
+
+        if images:
+            material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images))
+
+    return material_chunk
+
+class tri_wrapper(object):
+    '''Class representing a triangle.
+
+    Used when converting faces to triangles'''
+
+    __slots__ = 'vertex_index', 'mat', 'image', 'faceuvs', 'offset'
+    def __init__(self, vindex=(0,0,0), mat=None, image=None, faceuvs=None):
+        self.vertex_index= vindex
+        self.mat= mat
+        self.image= image
+        self.faceuvs= faceuvs
+        self.offset= [0, 0, 0] # offset indicies
+
+
+def extract_triangles(mesh):
+    '''Extract triangles from a mesh.
+
+    If the mesh contains quads, they will be split into triangles.'''
+    tri_list = []
+    do_uv = len(mesh.uv_textures)
+# 	do_uv = mesh.faceUV
+
+# 	if not do_uv:
+# 		face_uv = None
+
+    img = None
+    for i, face in enumerate(mesh.faces):
+        f_v = face.vertices
+# 		f_v = face.v
+
+        uf = mesh.uv_textures.active.data[i] if do_uv else None
+
+        if do_uv:
+            f_uv = uf.uv
+            # f_uv =  (uf.uv1, uf.uv2, uf.uv3, uf.uv4) if face.vertices[3] else (uf.uv1, uf.uv2, uf.uv3)
+# 			f_uv = face.uv
+            img = uf.image if uf else None
+# 			img = face.image
+            if img: img = img.name
+
+        # if f_v[3] == 0:
+        if len(f_v)==3:
+            new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+# 			new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img)
+            if (do_uv): new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+            tri_list.append(new_tri)
+
+        else: #it's a quad
+            new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+# 			new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img)
+            new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), face.material_index, img)
+# 			new_tri_2 = tri_wrapper((f_v[0].index, f_v[2].index, f_v[3].index), face.mat, img)
+
+            if (do_uv):
+                new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+                new_tri_2.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])
+
+            tri_list.append( new_tri )
+            tri_list.append( new_tri_2 )
+
+    return tri_list
+
+
+def remove_face_uv(verts, tri_list):
+    '''Remove face UV coordinates from a list of triangles.
+
+    Since 3ds files only support one pair of uv coordinates for each vertex, face uv coordinates
+    need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when
+    there are multiple uv coordinates per vertex.'''
+
+    # initialize a list of UniqueLists, one per vertex:
+    #uv_list = [UniqueList() for i in xrange(len(verts))]
+    unique_uvs= [{} for i in range(len(verts))]
+
+    # for each face uv coordinate, add it to the UniqueList of the vertex
+    for tri in tri_list:
+        for i in range(3):
+            # store the index into the UniqueList for future reference:
+            # offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i])))
+
+            context_uv_vert= unique_uvs[tri.vertex_index[i]]
+            uvkey= tri.faceuvs[i]
+
+            offset_index__uv_3ds = context_uv_vert.get(uvkey)
+
+            if not offset_index__uv_3ds:
+                offset_index__uv_3ds = context_uv_vert[uvkey] = len(context_uv_vert), _3ds_point_uv(uvkey)
+
+            tri.offset[i] = offset_index__uv_3ds[0]
+
+
+
+    # At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it
+    # only once.
+
+    # Now we need to duplicate every vertex as many times as it has uv coordinates and make sure the
+    # faces refer to the new face indices:
+    vert_index = 0
+    vert_array = _3ds_array()
+    uv_array = _3ds_array()
+    index_list = []
+    for i,vert in enumerate(verts):
+        index_list.append(vert_index)
+
+        pt = _3ds_point_3d(vert.co) # reuse, should be ok
+        uvmap = [None] * len(unique_uvs[i])
+        for ii, uv_3ds in unique_uvs[i].values():
+            # add a vertex duplicate to the vertex_array for every uv associated with this vertex:
+            vert_array.add(pt)
+            # add the uv coordinate to the uv array:
+            # This for loop does not give uv's ordered by ii, so we create a new map
+            # and add the uv's later
+            # uv_array.add(uv_3ds)
+            uvmap[ii] = uv_3ds
+
+        # Add the uv's in the correct order
+        for uv_3ds in uvmap:
+            # add the uv coordinate to the uv array:
+            uv_array.add(uv_3ds)
+
+        vert_index += len(unique_uvs[i])
+
+    # Make sure the triangle vertex indices now refer to the new vertex list:
+    for tri in tri_list:
+        for i in range(3):
+            tri.offset[i]+=index_list[tri.vertex_index[i]]
+        tri.vertex_index= tri.offset
+
+    return vert_array, uv_array, tri_list
+
+def make_faces_chunk(tri_list, mesh, materialDict):
+    '''Make a chunk for the faces.
+
+    Also adds subchunks assigning materials to all faces.'''
+
+    materials = mesh.materials
+    if not materials:
+        mat = None
+
+    face_chunk = _3ds_chunk(OBJECT_FACES)
+    face_list = _3ds_array()
+
+
+    if len(mesh.uv_textures):
+# 	if mesh.faceUV:
+        # Gather materials used in this mesh - mat/image pairs
+        unique_mats = {}
+        for i,tri in enumerate(tri_list):
+
+            face_list.add(_3ds_face(tri.vertex_index))
+
+            if materials:
+                mat = materials[tri.mat]
+                if mat: mat = mat.name
+
+            img = tri.image
+
+            try:
+                context_mat_face_array = unique_mats[mat, img][1]
+            except:
+
+                if mat:	name_str = mat
+                else:	name_str = 'None'
+                if img: name_str += img
+
+                context_mat_face_array = _3ds_array()
+                unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array
+
+
+            context_mat_face_array.add(_3ds_short(i))
+            # obj_material_faces[tri.mat].add(_3ds_short(i))
+
+        face_chunk.add_variable("faces", face_list)
+        for mat_name, mat_faces in unique_mats.values():
+            obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL)
+            obj_material_chunk.add_variable("name", mat_name)
+            obj_material_chunk.add_variable("face_list", mat_faces)
+            face_chunk.add_subchunk(obj_material_chunk)
+
+    else:
+
+        obj_material_faces=[]
+        obj_material_names=[]
+        for m in materials:
+            if m:
+                obj_material_names.append(_3ds_string(sane_name(m.name)))
+                obj_material_faces.append(_3ds_array())
+        n_materials = len(obj_material_names)
+
+        for i,tri in enumerate(tri_list):
+            face_list.add(_3ds_face(tri.vertex_index))
+            if (tri.mat < n_materials):
+                obj_material_faces[tri.mat].add(_3ds_short(i))
+
+        face_chunk.add_variable("faces", face_list)
+        for i in range(n_materials):
+            obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL)
+            obj_material_chunk.add_variable("name", obj_material_names[i])
+            obj_material_chunk.add_variable("face_list", obj_material_faces[i])
+            face_chunk.add_subchunk(obj_material_chunk)
+
+    return face_chunk
+
+def make_vert_chunk(vert_array):
+    '''Make a vertex chunk out of an array of vertices.'''
+    vert_chunk = _3ds_chunk(OBJECT_VERTICES)
+    vert_chunk.add_variable("vertices",vert_array)
+    return vert_chunk
+
+def make_uv_chunk(uv_array):
+    '''Make a UV chunk out of an array of UVs.'''
+    uv_chunk = _3ds_chunk(OBJECT_UV)
+    uv_chunk.add_variable("uv coords", uv_array)
+    return uv_chunk
+
+def make_mesh_chunk(mesh, materialDict):
+    '''Make a chunk out of a Blender mesh.'''
+
+    # Extract the triangles from the mesh:
+    tri_list = extract_triangles(mesh)
+
+    if len(mesh.uv_textures):
+# 	if mesh.faceUV:
+        # Remove the face UVs and convert it to vertex UV:
+        vert_array, uv_array, tri_list = remove_face_uv(mesh.vertices, tri_list)
+    else:
+        # Add the vertices to the vertex array:
+        vert_array = _3ds_array()
+        for vert in mesh.vertices:
+            vert_array.add(_3ds_point_3d(vert.co))
+        # If the mesh has vertex UVs, create an array of UVs:
+        if len(mesh.sticky):
+# 		if mesh.vertexUV:
+            uv_array = _3ds_array()
+            for uv in mesh.sticky:
+# 			for vert in mesh.vertices:
+                uv_array.add(_3ds_point_uv(uv.co))
+# 				uv_array.add(_3ds_point_uv(vert.uvco))
+        else:
+            # no UV at all:
+            uv_array = None
+
+    # create the chunk:
+    mesh_chunk = _3ds_chunk(OBJECT_MESH)
+
+    # add vertex chunk:
+    mesh_chunk.add_subchunk(make_vert_chunk(vert_array))
+    # add faces chunk:
+
+    mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict))
+
+    # if available, add uv chunk:
+    if uv_array:
+        mesh_chunk.add_subchunk(make_uv_chunk(uv_array))
+
+    return mesh_chunk
+
+""" # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+def make_kfdata(start=0, stop=0, curtime=0):
+    '''Make the basic keyframe data chunk'''
+    kfdata = _3ds_chunk(KFDATA)
+
+    kfhdr = _3ds_chunk(KFDATA_KFHDR)
+    kfhdr.add_variable("revision", _3ds_short(0))
+    # Not really sure what filename is used for, but it seems it is usually used
+    # to identify the program that generated the .3ds:
+    kfhdr.add_variable("filename", _3ds_string("Blender"))
+    kfhdr.add_variable("animlen", _3ds_int(stop-start))
+
+    kfseg = _3ds_chunk(KFDATA_KFSEG)
+    kfseg.add_variable("start", _3ds_int(start))
+    kfseg.add_variable("stop", _3ds_int(stop))
+
+    kfcurtime = _3ds_chunk(KFDATA_KFCURTIME)
+    kfcurtime.add_variable("curtime", _3ds_int(curtime))
+
+    kfdata.add_subchunk(kfhdr)
+    kfdata.add_subchunk(kfseg)
+    kfdata.add_subchunk(kfcurtime)
+    return kfdata
+"""
+
+"""
+def make_track_chunk(ID, obj):
+    '''Make a chunk for track data.
+
+    Depending on the ID, this will construct a position, rotation or scale track.'''
+    track_chunk = _3ds_chunk(ID)
+    track_chunk.add_variable("track_flags", _3ds_short())
+    track_chunk.add_variable("unknown", _3ds_int())
+    track_chunk.add_variable("unknown", _3ds_int())
+    track_chunk.add_variable("nkeys", _3ds_int(1))
+    # Next section should be repeated for every keyframe, but for now, animation is not actually supported.
+    track_chunk.add_variable("tcb_frame", _3ds_int(0))
+    track_chunk.add_variable("tcb_flags", _3ds_short())
+    if obj.type=='Empty':
+        if ID==POS_TRACK_TAG:
+            # position vector:
+            track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation()))
+        elif ID==ROT_TRACK_TAG:
+            # rotation (quaternion, angle first, followed by axis):
+            q = obj.getEuler().to_quat()
+            track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2])))
+        elif ID==SCL_TRACK_TAG:
+            # scale vector:
+            track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize()))
+    else:
+        # meshes have their transformations applied before
+        # exporting, so write identity transforms here:
+        if ID==POS_TRACK_TAG:
+            # position vector:
+            track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0)))
+        elif ID==ROT_TRACK_TAG:
+            # rotation (quaternion, angle first, followed by axis):
+            track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0)))
+        elif ID==SCL_TRACK_TAG:
+            # scale vector:
+            track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0)))
+
+    return track_chunk
+"""
+
+"""
+def make_kf_obj_node(obj, name_to_id):
+    '''Make a node chunk for a Blender object.
+
+    Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id.
+    Blender Empty objects are converted to dummy nodes.'''
+
+    name = obj.name
+    # main object node chunk:
+    kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG)
+    # chunk for the object id:
+    obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID)
+    # object id is from the name_to_id dictionary:
+    obj_id_chunk.add_variable("node_id", _3ds_short(name_to_id[name]))
+
+    # object node header:
+    obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR)
+    # object name:
+    if obj.type == 'Empty':
+        # Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk
+        # for their name (see below):
+        obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY"))
+    else:
+        # Add the name:
+        obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name)))
+    # Add Flag variables (not sure what they do):
+    obj_node_header_chunk.add_variable("flags1", _3ds_short(0))
+    obj_node_header_chunk.add_variable("flags2", _3ds_short(0))
+
+    # Check parent-child relationships:
+    parent = obj.parent
+    if (parent is None) or (parent.name not in name_to_id):
+        # If no parent, or the parents name is not in the name_to_id dictionary,
+        # parent id becomes -1:
+        obj_node_header_chunk.add_variable("parent", _3ds_short(-1))
+    else:
+        # Get the parent's id from the name_to_id dictionary:
+        obj_node_header_chunk.add_variable("parent", _3ds_short(name_to_id[parent.name]))
+
+    # Add pivot chunk:
+    obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT)
+    obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation()))
+    kf_obj_node.add_subchunk(obj_pivot_chunk)
+
+    # add subchunks for object id and node header:
+    kf_obj_node.add_subchunk(obj_id_chunk)
+    kf_obj_node.add_subchunk(obj_node_header_chunk)
+
+    # Empty objects need to have an extra chunk for the instance name:
+    if obj.type == 'Empty':
+        obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME)
+        obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name)))
+        kf_obj_node.add_subchunk(obj_instance_name_chunk)
+
+    # Add track chunks for position, rotation and scale:
+    kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj))
+    kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj))
+    kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj))
+
+    return kf_obj_node
+"""
+
+
+def save(operator, context, filepath=""):
+    import bpy
+    import time
+    from io_utils import create_derived_objects, free_derived_objects
+    
+    '''Save the Blender scene to a 3ds file.'''
+    
+    # Time the export
+    time1 = time.clock()
+#	Blender.Window.WaitCursor(1)
+
+    sce = context.scene
+
+    if bpy.ops.object.mode_set.poll():
+        bpy.ops.object.mode_set(mode='OBJECT')
+
+    # Initialize the main chunk (primary):
+    primary = _3ds_chunk(PRIMARY)
+    # Add version chunk:
+    version_chunk = _3ds_chunk(VERSION)
+    version_chunk.add_variable("version", _3ds_int(3))
+    primary.add_subchunk(version_chunk)
+
+    # init main object info chunk:
+    object_info = _3ds_chunk(OBJECTINFO)
+
+    ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+    # init main key frame data chunk:
+    kfdata = make_kfdata()
+    '''
+
+    # Get all the supported objects selected in this scene:
+    # ob_sel= list(sce.objects.context)
+    # mesh_objects = [ (ob, me) for ob in ob_sel   for me in (BPyMesh.getMeshFromObject(ob, None, True, False, sce),) if me ]
+    # empty_objects = [ ob for ob in ob_sel if ob.type == 'Empty' ]
+
+    # Make a list of all materials used in the selected meshes (use a dictionary,
+    # each material is added once):
+    materialDict = {}
+    mesh_objects = []
+    scene = context.scene
+    for ob in [ob for ob in scene.objects if ob.is_visible(scene)]:
+# 	for ob in sce.objects.context:
+
+        # get derived objects
+        free, derived = create_derived_objects(scene, ob)
+
+        if derived is None:
+            continue
+
+        for ob_derived, mat in derived:
+# 		for ob_derived, mat in getDerivedObjects(ob, False):
+
+            if ob.type not in ('MESH', 'CURVE', 'SURFACE', 'FONT', 'META'):
+                continue
+
+            data = ob_derived.create_mesh(scene, True, 'PREVIEW')
+# 			data = getMeshFromObject(ob_derived, None, True, False, sce)
+            if data:
+                data.transform(mat)
+# 				data.transform(mat, recalc_normals=False)
+                mesh_objects.append((ob_derived, data))
+                mat_ls = data.materials
+                mat_ls_len = len(mat_ls)
+
+                # get material/image tuples.
+                if len(data.uv_textures):
+# 				if data.faceUV:
+                    if not mat_ls:
+                        mat = mat_name = None
+
+                    for f, uf in zip(data.faces, data.uv_textures.active.data):
+                        if mat_ls:
+                            mat_index = f.material_index
+# 							mat_index = f.mat
+                            if mat_index >= mat_ls_len:
+                                mat_index = f.mat = 0
+                            mat = mat_ls[mat_index]
+                            if mat:	mat_name = mat.name
+                            else:	mat_name = None
+                        # else there already set to none
+
+                        img = uf.image
+# 						img = f.image
+                        if img:	img_name = img.name
+                        else:	img_name = None
+
+                        materialDict.setdefault((mat_name, img_name), (mat, img) )
+
+
+                else:
+                    for mat in mat_ls:
+                        if mat: # material may be None so check its not.
+                            materialDict.setdefault((mat.name, None), (mat, None) )
+
+                    # Why 0 Why!
+                    for f in data.faces:
+                        if f.material_index >= mat_ls_len:
+# 						if f.mat >= mat_ls_len:
+                            f.material_index = 0
+                            # f.mat = 0
+
+        if free:
+            free_derived_objects(ob)
+
+
+    # Make material chunks for all materials used in the meshes:
+    for mat_and_image in materialDict.values():
+        object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1]))
+
+    # Give all objects a unique ID and build a dictionary from object name to object id:
+    """
+    name_to_id = {}
+    for ob, data in mesh_objects:
+        name_to_id[ob.name]= len(name_to_id)
+    #for ob in empty_objects:
+    #	name_to_id[ob.name]= len(name_to_id)
+    """
+
+    # Create object chunks for all meshes:
+    i = 0
+    for ob, blender_mesh in mesh_objects:
+        # create a new object chunk
+        object_chunk = _3ds_chunk(OBJECT)
+
+        # set the object name
+        object_chunk.add_variable("name", _3ds_string(sane_name(ob.name)))
+
+        # make a mesh chunk out of the mesh:
+        object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict))
+        object_info.add_subchunk(object_chunk)
+
+        ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+        # make a kf object node for the object:
+        kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+        '''
+        if not blender_mesh.users:
+            bpy.data.meshes.remove(blender_mesh)
+# 		blender_mesh.vertices = None
+
+        i+=i
+
+    # Create chunks for all empties:
+    ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+    for ob in empty_objects:
+        # Empties only require a kf object node:
+        kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+        pass
+    '''
+
+    # Add main object info chunk to primary chunk:
+    primary.add_subchunk(object_info)
+
+    ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+    # Add main keyframe data chunk to primary chunk:
+    primary.add_subchunk(kfdata)
+    '''
+
+    # At this point, the chunk hierarchy is completely built.
+
+    # Check the size:
+    primary.get_size()
+    # Open the file for writing:
+    file = open(filepath, 'wb')
+
+    # Recursively write the chunks to file:
+    primary.write(file)
+
+    # Close the file:
+    file.close()
+
+    # Clear name mapping vars, could make locals too
+    name_unique[:] = []
+    name_mapping.clear()
+
+    # Debugging only: report the exporting time:
+# 	Blender.Window.WaitCursor(0)
+    print("3ds export time: %.2f" % (time.clock() - time1))
+
+    # Debugging only: dump the chunk hierarchy:
+    #primary.dump()
+    
+    return {'FINISHED'}
diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..26093078335b0a08bfb6f5a0f1e43671d48b1988
--- /dev/null
+++ b/io_scene_3ds/import_3ds.py
@@ -0,0 +1,894 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Bob Holcomb
+# Contributors: Bob Holcomb, Richard L?rk?ng, Damien McGinnes, Campbell Barton, Mario Lapin, Dominique Lorre
+
+import os
+import time
+import struct
+
+from io_utils import load_image
+
+import bpy
+import mathutils
+
+BOUNDS_3DS = []
+
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will see
+#----- Primary Chunk, at the beginning of each file
+PRIMARY = int('0x4D4D',16)
+
+#------ Main Chunks
+OBJECTINFO   =     0x3D3D      #This gives the version of the mesh and is found right before the material and object information
+VERSION      =     0x0002      #This gives the version of the .3ds file
+EDITKEYFRAME=      0xB000      #This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL = 45055		#0xAFFF				// This stored the texture info
+OBJECT = 16384		#0x4000				// This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+#------ sub defines of MATERIAL_BLOCK
+MAT_NAME		=	0xA000	# This holds the material name
+MAT_AMBIENT		=	0xA010	# Ambient color of the object/material
+MAT_DIFFUSE		=	0xA020	# This holds the color of the object/material
+MAT_SPECULAR	=	0xA030	# SPecular color of the object/material
+MAT_SHINESS		=	0xA040	# ??
+MAT_TRANSPARENCY=	0xA050	# Transparency value of material
+MAT_SELF_ILLUM	=	0xA080	# Self Illumination value of material
+MAT_WIRE		=	0xA085	# Only render's wireframe
+
+MAT_TEXTURE_MAP	=	0xA200	# This is a header for a new texture map
+MAT_SPECULAR_MAP=	0xA204	# This is a header for a new specular map
+MAT_OPACITY_MAP	=	0xA210	# This is a header for a new opacity map
+MAT_REFLECTION_MAP=	0xA220	# This is a header for a new reflection map
+MAT_BUMP_MAP	=	0xA230	# This is a header for a new bump map
+MAT_MAP_FILEPATH =  0xA300  # This holds the file name of the texture
+
+MAT_FLOAT_COLOR = 0x0010  #color defined as 3 floats
+MAT_24BIT_COLOR	= 0x0011  #color defined as 3 bytes
+
+#>------ sub defines of OBJECT
+OBJECT_MESH  =      0x4100      # This lets us know that we are reading a new object
+OBJECT_LAMP =      0x4600      # This lets un know we are reading a light object
+OBJECT_LAMP_SPOT = 0x4610		# The light is a spotloght.
+OBJECT_LAMP_OFF = 0x4620		# The light off.
+OBJECT_LAMP_ATTENUATE = 0x4625
+OBJECT_LAMP_RAYSHADE = 0x4627
+OBJECT_LAMP_SHADOWED = 0x4630
+OBJECT_LAMP_LOCAL_SHADOW = 0x4640
+OBJECT_LAMP_LOCAL_SHADOW2 = 0x4641
+OBJECT_LAMP_SEE_CONE = 0x4650
+OBJECT_LAMP_SPOT_RECTANGULAR = 0x4651
+OBJECT_LAMP_SPOT_OVERSHOOT = 0x4652
+OBJECT_LAMP_SPOT_PROJECTOR = 0x4653
+OBJECT_LAMP_EXCLUDE = 0x4654
+OBJECT_LAMP_RANGE = 0x4655
+OBJECT_LAMP_ROLL = 0x4656
+OBJECT_LAMP_SPOT_ASPECT = 0x4657
+OBJECT_LAMP_RAY_BIAS = 0x4658
+OBJECT_LAMP_INNER_RANGE = 0x4659
+OBJECT_LAMP_OUTER_RANGE = 0x465A
+OBJECT_LAMP_MULTIPLIER = 0x465B
+OBJECT_LAMP_AMBIENT_LIGHT = 0x4680
+
+
+
+OBJECT_CAMERA=      0x4700      # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES=   0x4720      # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES =   0x4110      # The objects vertices
+OBJECT_FACES    =   0x4120      # The objects faces
+OBJECT_MATERIAL =   0x4130      # This is found if the object has a material, either texture map or color
+OBJECT_UV       =   0x4140      # The UV texture coordinates
+OBJECT_TRANS_MATRIX  =   0x4160 # The Object Matrix
+
+#>------ sub defines of EDITKEYFRAME
+# ED_KEY_AMBIENT_NODE        =   0xB001
+ED_KEY_OBJECT_NODE         =   0xB002
+# ED_KEY_CAMERA_NODE         =   0xB003
+# ED_KEY_TARGET_NODE         =   0xB004
+# ED_KEY_LIGHT_NODE          =   0xB005
+# ED_KEY_L_TARGET_NODE       =   0xB006  
+# ED_KEY_SPOTLIGHT_NODE      =   0xB007
+#>------ sub defines of ED_KEY_OBJECT_NODE
+# EK_OB_KEYFRAME_SEG        =   0xB008
+# EK_OB_KEYFRAME_CURTIME    =   0xB009
+# EK_OB_KEYFRAME_HEADER     =   0xB00A
+EK_OB_NODE_HEADER         =   0xB010
+EK_OB_INSTANCE_NAME       =   0xB011
+# EK_OB_PRESCALE            =   0xB012
+# EK_OB_PIVOT               =   0xB013
+# EK_OB_BOUNDBOX            =   0xB014
+# EK_OB_MORPH_SMOOTH        =   0xB015
+EK_OB_POSITION_TRACK      =   0xB020
+EK_OB_ROTATION_TRACK      =   0xB021
+EK_OB_SCALE_TRACK         =   0xB022
+# EK_OB_CAMERA_FOV_TRACK =       0xB023
+# EK_OB_CAMERA_ROLL_TRACK   =   0xB024
+# EK_OB_COLOR_TRACK         =   0xB025
+# EK_OB_MORPH_TRACK         =   0xB026
+# EK_OB_HOTSPOT_TRACK       =   0xB027
+# EK_OB_FALLOF_TRACK        =   0xB028
+# EK_OB_HIDE_TRACK          =   0xB029
+# EK_OB_NODE_ID             =   0xB030
+
+ROOT_OBJECT         =   0xFFFF
+
+global scn
+scn = None
+global object_dictionary # dictionary for object hierarchy
+object_dictionary = {} 
+
+
+#the chunk class
+class chunk:
+    ID = 0
+    length = 0
+    bytes_read = 0
+
+    #we don't read in the bytes_read, we compute that
+    binary_format='<HI'
+
+    def __init__(self):
+        self.ID = 0
+        self.length = 0
+        self.bytes_read = 0
+
+    def dump(self):
+        print('ID: ', self.ID)
+        print('ID in hex: ', hex(self.ID))
+        print('length: ', self.length)
+        print('bytes_read: ', self.bytes_read)
+
+def read_chunk(file, chunk):
+    temp_data = file.read(struct.calcsize(chunk.binary_format))
+    data = struct.unpack(chunk.binary_format, temp_data)
+    chunk.ID = data[0]
+    chunk.length = data[1]
+    #update the bytes read function
+    chunk.bytes_read = 6
+
+    #if debugging
+    #chunk.dump()
+
+def read_string(file):
+    #read in the characters till we get a null character
+    s = b''
+    while True:
+        c = struct.unpack('<c', file.read(1))[0]
+        if c == b'\x00':
+            break
+        s += c
+        #print 'string: ',s
+
+    #remove the null character from the string
+# 	print("read string", s)
+    return str(s, "utf-8", "replace"), len(s) + 1
+
+######################################################
+# IMPORT
+######################################################
+def process_next_object_chunk(file, previous_chunk):
+    new_chunk = chunk()
+    temp_chunk = chunk()
+
+    while (previous_chunk.bytes_read < previous_chunk.length):
+        #read the next chunk
+        read_chunk(file, new_chunk)
+
+def skip_to_end(file, skip_chunk):
+    buffer_size = skip_chunk.length - skip_chunk.bytes_read
+    binary_format='%ic' % buffer_size
+    temp_data = file.read(struct.calcsize(binary_format))
+    skip_chunk.bytes_read += buffer_size
+
+
+def add_texture_to_material(image, texture, material, mapto):
+    #print('assigning %s to %s' % (texture, material))
+
+    if mapto not in ("COLOR", "SPECULARITY", "ALPHA", "NORMAL"):
+        print('/tError:  Cannot map to "%s"\n\tassuming diffuse color. modify material "%s" later.' % (mapto, material.name))
+        mapto = "COLOR"
+
+    if image:
+        texture.image = image
+
+    mtex = material.texture_slots.add()
+    mtex.texture = texture
+    mtex.texture_coords = 'UV'
+    mtex.use_map_color_diffuse = False
+
+    if mapto == 'COLOR':
+        mtex.use_map_color_diffuse = True
+    elif mapto == 'SPECULARITY':
+        mtex.use_map_specular = True
+    elif mapto == 'ALPHA':
+        mtex.use_map_alpha = True
+    elif mapto == 'NORMAL':
+        mtex.use_map_normal = True
+
+
+def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH):
+    #print previous_chunk.bytes_read, 'BYTES READ'
+    contextObName = None
+    contextLamp = [None, None] # object, Data
+    contextMaterial = None
+    contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity()
+    #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity()
+    contextMesh_vertls = None # flat array: (verts * 3)
+    contextMesh_facels = None
+    contextMeshMaterials = {} # matname:[face_idxs]
+    contextMeshUV = None # flat array (verts * 2)
+
+    TEXTURE_DICT = {}
+    MATDICT = {}
+# 	TEXMODE = Mesh.FaceModes['TEX']
+
+    # Localspace variable names, faster.
+    STRUCT_SIZE_1CHAR = struct.calcsize('c')
+    STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
+    STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
+    STRUCT_SIZE_4FLOAT = struct.calcsize('4f')
+    STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
+    STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
+    STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
+    _STRUCT_SIZE_4x3MAT = struct.calcsize('fffffffffffff')
+    # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
+    # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
+    # only init once
+    object_list = [] # for hierarchy
+    object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
+    
+    def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
+        bmesh = bpy.data.meshes.new(contextObName)
+
+        if myContextMesh_facels is None:
+            myContextMesh_facels = []
+
+        if myContextMesh_vertls:
+
+            bmesh.vertices.add(len(myContextMesh_vertls)//3)
+            bmesh.faces.add(len(myContextMesh_facels))
+            bmesh.vertices.foreach_set("co", myContextMesh_vertls)
+            
+            eekadoodle_faces = []
+            for v1, v2, v3 in myContextMesh_facels:
+                eekadoodle_faces.extend([v3, v1, v2, 0] if v3 == 0 else [v1, v2, v3, 0])
+            bmesh.faces.foreach_set("vertices_raw", eekadoodle_faces)
+            
+            if bmesh.faces and contextMeshUV:
+                bmesh.uv_textures.new()
+                uv_faces = bmesh.uv_textures.active.data[:]
+            else:
+                uv_faces = None
+
+            for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials.items()):
+                if matName is None:
+                    bmat = None
+                else:
+                    bmat = MATDICT[matName][1]
+                    img = TEXTURE_DICT.get(bmat.name)
+
+                bmesh.materials.append(bmat) # can be None
+
+                if uv_faces  and img:
+                    for fidx in faces:
+                        bmesh.faces[fidx].material_index = mat_idx
+                        uf = uv_faces[fidx]
+                        uf.image = img
+                        uf.use_image = True
+                else:
+                    for fidx in faces:
+                        bmesh.faces[fidx].material_index = mat_idx
+                
+            if uv_faces:
+                for fidx, uf in enumerate(uv_faces):
+                    face = myContextMesh_facels[fidx]
+                    v1, v2, v3 = face
+                    
+                    # eekadoodle
+                    if v3 == 0:
+                        v1, v2, v3 = v3, v1, v2
+                    
+                    uf.uv1 = contextMeshUV[v1 * 2:(v1 * 2) + 2]
+                    uf.uv2 = contextMeshUV[v2 * 2:(v2 * 2) + 2]
+                    uf.uv3 = contextMeshUV[v3 * 2:(v3 * 2) + 2]
+                    # always a tri
+
+        ob = bpy.data.objects.new(contextObName, bmesh)
+        object_dictionary[contextObName] = ob
+        SCN.objects.link(ob)
+        
+        '''
+        if contextMatrix_tx:
+            ob.setMatrix(contextMatrix_tx)
+        '''
+        
+        if contextMatrix_rot:
+            ob.matrix_local = contextMatrix_rot
+
+        importedObjects.append(ob)
+        bmesh.update()
+
+    #a spare chunk
+    new_chunk = chunk()
+    temp_chunk = chunk()
+
+    CreateBlenderObject = False
+
+    def read_float_color(temp_chunk):
+        temp_data = file.read(struct.calcsize('3f'))
+        temp_chunk.bytes_read += 12
+        return [float(col) for col in struct.unpack('<3f', temp_data)]
+
+    def read_byte_color(temp_chunk):
+        temp_data = file.read(struct.calcsize('3B'))
+        temp_chunk.bytes_read += 3
+        return [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+
+    def read_texture(new_chunk, temp_chunk, name, mapto):
+        new_texture = bpy.data.textures.new(name, type='IMAGE')
+
+        img = None
+        while (new_chunk.bytes_read < new_chunk.length):
+            #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
+            read_chunk(file, temp_chunk)
+
+            if (temp_chunk.ID == MAT_MAP_FILEPATH):
+                texture_name, read_str_len = read_string(file)
+                img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+                new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed
+
+            else:
+                skip_to_end(file, temp_chunk)
+
+            new_chunk.bytes_read += temp_chunk.bytes_read
+
+        # add the map to the material in the right channel
+        if img:
+            add_texture_to_material(img, new_texture, contextMaterial, mapto)
+
+    dirname = os.path.dirname(file.name)
+
+    #loop through all the data for this chunk (previous chunk) and see what it is
+    while (previous_chunk.bytes_read < previous_chunk.length):
+        #print '\t', previous_chunk.bytes_read, 'keep going'
+        #read the next chunk
+        #print 'reading a chunk'
+        read_chunk(file, new_chunk)
+
+        #is it a Version chunk?
+        if (new_chunk.ID == VERSION):
+            #print 'if (new_chunk.ID == VERSION):'
+            #print 'found a VERSION chunk'
+            #read in the version of the file
+            #it's an unsigned short (H)
+            temp_data = file.read(struct.calcsize('I'))
+            version = struct.unpack('<I', temp_data)[0]
+            new_chunk.bytes_read += 4 #read the 4 bytes for the version number
+            #this loader works with version 3 and below, but may not with 4 and above
+            if (version > 3):
+                print('\tNon-Fatal Error:  Version greater than 3, may not load correctly: ', version)
+
+        #is it an object info chunk?
+        elif (new_chunk.ID == OBJECTINFO):
+            #print 'elif (new_chunk.ID == OBJECTINFO):'
+            # print 'found an OBJECTINFO chunk'
+            process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH)
+
+            #keep track of how much we read in the main chunk
+            new_chunk.bytes_read += temp_chunk.bytes_read
+
+        #is it an object chunk?
+        elif (new_chunk.ID == OBJECT):
+
+            if CreateBlenderObject:
+                putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+                contextMesh_vertls = []; contextMesh_facels = []
+
+                ## preparando para receber o proximo objeto
+                contextMeshMaterials = {} # matname:[face_idxs]
+                contextMeshUV = None
+                #contextMesh.vertexUV = 1 # Make sticky coords.
+                # Reset matrix
+                contextMatrix_rot = None
+                #contextMatrix_tx = None
+
+            CreateBlenderObject = True
+            contextObName, read_str_len = read_string(file)
+            new_chunk.bytes_read += read_str_len
+
+        #is it a material chunk?
+        elif (new_chunk.ID == MATERIAL):
+
+# 			print("read material")
+
+            #print 'elif (new_chunk.ID == MATERIAL):'
+            contextMaterial = bpy.data.materials.new('Material')
+
+        elif (new_chunk.ID == MAT_NAME):
+            #print 'elif (new_chunk.ID == MAT_NAME):'
+            material_name, read_str_len = read_string(file)
+
+# 			print("material name", material_name)
+
+            #plus one for the null character that ended the string
+            new_chunk.bytes_read += read_str_len
+
+            contextMaterial.name = material_name.rstrip() # remove trailing  whitespace
+            MATDICT[material_name]= (contextMaterial.name, contextMaterial)
+
+        elif (new_chunk.ID == MAT_AMBIENT):
+            #print 'elif (new_chunk.ID == MAT_AMBIENT):'
+            read_chunk(file, temp_chunk)
+            if (temp_chunk.ID == MAT_FLOAT_COLOR):
+                contextMaterial.mirror_color = read_float_color(temp_chunk)
+# 				temp_data = file.read(struct.calcsize('3f'))
+# 				temp_chunk.bytes_read += 12
+# 				contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+            elif (temp_chunk.ID == MAT_24BIT_COLOR):
+                contextMaterial.mirror_color = read_byte_color(temp_chunk)
+# 				temp_data = file.read(struct.calcsize('3B'))
+# 				temp_chunk.bytes_read += 3
+# 				contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+            else:
+                skip_to_end(file, temp_chunk)
+            new_chunk.bytes_read += temp_chunk.bytes_read
+
+        elif (new_chunk.ID == MAT_DIFFUSE):
+            #print 'elif (new_chunk.ID == MAT_DIFFUSE):'
+            read_chunk(file, temp_chunk)
+            if (temp_chunk.ID == MAT_FLOAT_COLOR):
+                contextMaterial.diffuse_color = read_float_color(temp_chunk)
+# 				temp_data = file.read(struct.calcsize('3f'))
+# 				temp_chunk.bytes_read += 12
+# 				contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+            elif (temp_chunk.ID == MAT_24BIT_COLOR):
+                contextMaterial.diffuse_color = read_byte_color(temp_chunk)
+# 				temp_data = file.read(struct.calcsize('3B'))
+# 				temp_chunk.bytes_read += 3
+# 				contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+            else:
+                skip_to_end(file, temp_chunk)
+
+# 			print("read material diffuse color", contextMaterial.diffuse_color)
+
+            new_chunk.bytes_read += temp_chunk.bytes_read
+
+        elif (new_chunk.ID == MAT_SPECULAR):
+            #print 'elif (new_chunk.ID == MAT_SPECULAR):'
+            read_chunk(file, temp_chunk)
+            if (temp_chunk.ID == MAT_FLOAT_COLOR):
+                contextMaterial.specular_color = read_float_color(temp_chunk)
+# 				temp_data = file.read(struct.calcsize('3f'))
+# 				temp_chunk.bytes_read += 12
+# 				contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+            elif (temp_chunk.ID == MAT_24BIT_COLOR):
+                contextMaterial.specular_color = read_byte_color(temp_chunk)
+# 				temp_data = file.read(struct.calcsize('3B'))
+# 				temp_chunk.bytes_read += 3
+# 				contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+            else:
+                skip_to_end(file, temp_chunk)
+            new_chunk.bytes_read += temp_chunk.bytes_read
+
+        elif (new_chunk.ID == MAT_TEXTURE_MAP):
+            read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
+
+        elif (new_chunk.ID == MAT_SPECULAR_MAP):
+            read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
+
+        elif (new_chunk.ID == MAT_OPACITY_MAP):
+            read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
+
+        elif (new_chunk.ID == MAT_BUMP_MAP):
+            read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
+
+        elif (new_chunk.ID == MAT_TRANSPARENCY):
+            #print 'elif (new_chunk.ID == MAT_TRANSPARENCY):'
+            read_chunk(file, temp_chunk)
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+
+            temp_chunk.bytes_read += 2
+            contextMaterial.alpha = 1-(float(struct.unpack('<H', temp_data)[0])/100)
+            new_chunk.bytes_read += temp_chunk.bytes_read
+
+
+        elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support.
+
+            temp_data = file.read(STRUCT_SIZE_3FLOAT)
+
+            x,y,z = struct.unpack('<3f', temp_data)
+            new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+
+            # no lamp in dict that would be confusing
+            contextLamp[1] = bpy.data.lamps.new("Lamp", 'POINT')
+            contextLamp[0] = ob = bpy.data.objects.new("Lamp", contextLamp[1])
+
+            SCN.objects.link(ob)
+            importedObjects.append(contextLamp[0])
+
+            #print 'number of faces: ', num_faces
+            #print x,y,z
+            contextLamp[0].location = (x, y, z)
+# 			contextLamp[0].setLocation(x,y,z)
+
+            # Reset matrix
+            contextMatrix_rot = None
+            #contextMatrix_tx = None
+            #print contextLamp.name,
+
+        elif (new_chunk.ID == OBJECT_MESH):
+            # print 'Found an OBJECT_MESH chunk'
+            pass
+        elif (new_chunk.ID == OBJECT_VERTICES):
+            '''
+            Worldspace vertex locations
+            '''
+            # print 'elif (new_chunk.ID == OBJECT_VERTICES):'
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            num_verts = struct.unpack('<H', temp_data)[0]
+            new_chunk.bytes_read += 2
+
+            # print 'number of verts: ', num_verts
+            contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts))
+            new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts
+            # dummyvert is not used atm!
+            
+            #print 'object verts: bytes read: ', new_chunk.bytes_read
+
+        elif (new_chunk.ID == OBJECT_FACES):
+            # print 'elif (new_chunk.ID == OBJECT_FACES):'
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            num_faces = struct.unpack('<H', temp_data)[0]
+            new_chunk.bytes_read += 2
+            #print 'number of faces: ', num_faces
+
+            # print '\ngetting a face'
+            temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces)
+            new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces #4 short ints x 2 bytes each
+            contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data)
+            contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)]
+
+        elif (new_chunk.ID == OBJECT_MATERIAL):
+            # print 'elif (new_chunk.ID == OBJECT_MATERIAL):'
+            material_name, read_str_len = read_string(file)
+            new_chunk.bytes_read += read_str_len # remove 1 null character.
+
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            num_faces_using_mat = struct.unpack('<H', temp_data)[0]
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+
+            
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat)
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat
+
+            contextMeshMaterials[material_name]= struct.unpack("<%dH" % (num_faces_using_mat), temp_data)
+
+            #look up the material in all the materials
+
+        elif (new_chunk.ID == OBJECT_UV):
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            num_uv = struct.unpack('<H', temp_data)[0]
+            new_chunk.bytes_read += 2
+
+            temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv)
+            new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv
+            contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)
+
+        elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
+            # How do we know the matrix size? 54 == 4x4 48 == 4x3
+            temp_data = file.read(STRUCT_SIZE_4x3MAT)
+            data = list( struct.unpack('<ffffffffffff', temp_data)  )
+            new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
+
+            contextMatrix_rot = mathutils.Matrix((data[:3] + [0], \
+                                                  data[3:6] + [0], \
+                                                  data[6:9] + [0], \
+                                                  data[9:] + [1], \
+                                                  ))
+
+        elif  (new_chunk.ID == MAT_MAP_FILEPATH):
+            texture_name, read_str_len = read_string(file)
+            try:
+                TEXTURE_DICT[contextMaterial.name]
+            except:
+                #img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH)
+                img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+# 				img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
+
+            new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed
+        elif new_chunk.ID == EDITKEYFRAME:
+            pass
+
+        elif new_chunk.ID == ED_KEY_OBJECT_NODE: #another object is being processed
+            child = None
+
+        elif new_chunk.ID == EK_OB_NODE_HEADER:
+            object_name, read_str_len = read_string(file)
+            new_chunk.bytes_read += read_str_len
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+            new_chunk.bytes_read += 4			
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            hierarchy = struct.unpack('<H', temp_data)[0]
+            new_chunk.bytes_read += 2
+
+            child = object_dictionary.get(object_name)
+
+            if child is None:
+                child = bpy.data.objects.new(object_name, None) # create an empty object
+                SCN.objects.link(child)			
+
+            object_list.append(child)
+            object_parent.append(hierarchy)
+
+        elif new_chunk.ID == EK_OB_INSTANCE_NAME:
+            object_name, read_str_len = read_string(file)
+            child.name = object_name
+            object_dictionary[object_name] = child
+            new_chunk.bytes_read += read_str_len
+
+        elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            nkeys = struct.unpack('<H', temp_data)[0]
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+            for i in range(nkeys):
+                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+                nframe = struct.unpack('<H', temp_data)[0]
+                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+                temp_data = file.read(STRUCT_SIZE_3FLOAT)
+                loc = struct.unpack('<3f', temp_data)
+                new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+                if nframe == 0:
+                    child.location = loc
+
+        elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            nkeys = struct.unpack('<H', temp_data)[0]
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+            for i in range(nkeys):
+                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+                nframe = struct.unpack('<H', temp_data)[0]
+                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+                temp_data = file.read(STRUCT_SIZE_4FLOAT)
+                rad,axis_x,axis_y,axis_z = struct.unpack('<4f', temp_data)
+                new_chunk.bytes_read += STRUCT_SIZE_4FLOAT
+                if nframe == 0:
+                    child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler()   # why negative?
+
+        elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            nkeys = struct.unpack('<H', temp_data)[0]
+            temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+            new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+            for i in range(nkeys):
+                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+                nframe = struct.unpack('<H', temp_data)[0]
+                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+                temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+                new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+                temp_data = file.read(STRUCT_SIZE_3FLOAT)
+                sca = struct.unpack('<3f', temp_data)
+                new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+                if nframe == 0:
+                    child.scale = sca
+
+        else: #(new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
+            # print 'skipping to end of this chunk'
+            #print("unknown chunk: "+hex(new_chunk.ID))
+            buffer_size = new_chunk.length - new_chunk.bytes_read
+            binary_format='%ic' % buffer_size
+            temp_data = file.read(struct.calcsize(binary_format))
+            new_chunk.bytes_read += buffer_size
+
+
+        #update the previous chunk bytes read
+        # print 'previous_chunk.bytes_read += new_chunk.bytes_read'
+        # print previous_chunk.bytes_read, new_chunk.bytes_read
+        previous_chunk.bytes_read += new_chunk.bytes_read
+        ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read
+
+    # FINISHED LOOP
+    # There will be a number of objects still not added
+    if CreateBlenderObject:
+        putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+
+
+    # Assign parents to objects    
+    for ind, ob in enumerate(object_list):
+        parent = object_parent[ind]
+        if parent == ROOT_OBJECT:
+            ob.parent = None
+        else:
+            ob.parent = object_list[parent]
+
+
+def load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, APPLY_MATRIX=True):
+    global SCN
+
+    # XXX
+# 	if BPyMessages.Error_NoFile(filepath):
+# 		return
+
+    print("importing 3DS: %r..." % (filepath), end="")
+
+    time1 = time.clock()
+# 	time1 = Blender.sys.time()
+
+    current_chunk = chunk()
+
+    file = open(filepath, 'rb')
+
+    #here we go!
+    # print 'reading the first chunk'
+    read_chunk(file, current_chunk)
+    if (current_chunk.ID!=PRIMARY):
+        print('\tFatal Error:  Not a valid 3ds file: %r' % filepath)
+        file.close()
+        return
+
+
+    # IMPORT_AS_INSTANCE = Blender.Draw.Create(0)
+# 	IMPORT_CONSTRAIN_BOUNDS = Blender.Draw.Create(10.0)
+# 	IMAGE_SEARCH = Blender.Draw.Create(1)
+# 	APPLY_MATRIX = Blender.Draw.Create(0)
+
+    # Get USER Options
+# 	pup_block = [\
+# 	('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\
+# 	('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\
+# 	('Transform Fix', APPLY_MATRIX, 'Workaround for object transformations importing incorrectly'),\
+# 	#('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\
+# 	]
+
+# 	if PREF_UI:
+# 		if not Blender.Draw.PupBlock('Import 3DS...', pup_block):
+# 			return
+
+# 	Blender.Window.WaitCursor(1)
+
+# 	IMPORT_CONSTRAIN_BOUNDS = IMPORT_CONSTRAIN_BOUNDS.val
+# 	# IMPORT_AS_INSTANCE = IMPORT_AS_INSTANCE.val
+# 	IMAGE_SEARCH = IMAGE_SEARCH.val
+# 	APPLY_MATRIX = APPLY_MATRIX.val
+
+    if IMPORT_CONSTRAIN_BOUNDS:
+        BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30]
+    else:
+        BOUNDS_3DS[:]= []
+
+    ##IMAGE_SEARCH
+
+    # fixme, make unglobal, clear incase
+    object_dictionary.clear()
+
+    scn = context.scene
+# 	scn = bpy.data.scenes.active
+    SCN = scn
+# 	SCN_OBJECTS = scn.objects
+# 	SCN_OBJECTS.selected = [] # de select all
+
+    importedObjects = [] # Fill this list with objects
+    process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH)
+
+    # fixme, make unglobal
+    object_dictionary.clear()
+
+    # Link the objects into this scene.
+    # Layers = scn.Layers
+
+    # REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
+
+    if APPLY_MATRIX:
+        for ob in importedObjects:
+            if ob.type == 'MESH':
+                me = ob.data
+                me.transform(ob.matrix_local.copy().invert())
+
+    # Done DUMMYVERT
+    """
+    if IMPORT_AS_INSTANCE:
+        name = filepath.split('\\')[-1].split('/')[-1]
+        # Create a group for this import.
+        group_scn = Scene.New(name)
+        for ob in importedObjects:
+            group_scn.link(ob) # dont worry about the layers
+
+        grp = Blender.Group.New(name)
+        grp.objects = importedObjects
+
+        grp_ob = Object.New('Empty', name)
+        grp_ob.enableDupGroup = True
+        grp_ob.DupGroup = grp
+        scn.link(grp_ob)
+        grp_ob.Layers = Layers
+        grp_ob.sel = 1
+    else:
+        # Select all imported objects.
+        for ob in importedObjects:
+            scn.link(ob)
+            ob.Layers = Layers
+            ob.sel = 1
+    """
+
+    if 0:
+# 	if IMPORT_CONSTRAIN_BOUNDS!=0.0:
+        # Set bounds from objecyt bounding box
+        for ob in importedObjects:
+            if ob.type == 'MESH':
+# 			if ob.type=='Mesh':
+                ob.makeDisplayList() # Why dosnt this update the bounds?
+                for v in ob.getBoundBox():
+                    for i in (0,1,2):
+                        if v[i] < BOUNDS_3DS[i]:
+                            BOUNDS_3DS[i]= v[i] # min
+
+                        if v[i] > BOUNDS_3DS[i + 3]:
+                            BOUNDS_3DS[i + 3]= v[i] # min
+
+        # Get the max axis x/y/z
+        max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2])
+        # print max_axis
+        if max_axis < 1 << 30: # Should never be false but just make sure.
+
+            # Get a new scale factor if set as an option
+            SCALE = 1.0
+            while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS:
+                SCALE/=10
+
+            # SCALE Matrix
+            SCALE_MAT = mathutils.Matrix.Scale(SCALE, 4)
+
+            for ob in importedObjects:
+                if ob.parent is None:
+                    ob.matrix_world =  ob.matrix_world * SCALE_MAT
+
+        # Done constraining to bounds.
+
+    # Select all new objects.
+    print(" done in %.4f sec." % (time.clock()-time1))
+    file.close()
+
+
+def load(operator, context, filepath="", constrain_size=0.0, use_image_search=True, use_apply_transform=True):
+    load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=constrain_size, IMAGE_SEARCH=use_image_search, APPLY_MATRIX=use_apply_transform)
+    return {'FINISHED'}
diff --git a/io_scene_fbx/__init__.py b/io_scene_fbx/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0544e44239b7b7ab27aad7ced7e759a8028bce5
--- /dev/null
+++ b/io_scene_fbx/__init__.py
@@ -0,0 +1,121 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+    "name": "Autodesk FBX format",
+    "author": "Campbell Barton",
+    "location": "File > Import-Export",
+    "description": "Import-Export FBX meshes, UV's, vertex colors, materials, textures, cameras and lamps",
+    "warning": "",
+    "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+        "Scripts/Import-Export/Autodesk_FBX",
+    "tracker_url": "",
+    "support": 'OFFICIAL',
+    "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+    import imp
+    if "export_fbx" in locals():
+        imp.reload(export_fbx)
+
+
+import bpy
+from bpy.props import *
+from io_utils import ExportHelper
+
+
+class ExportFBX(bpy.types.Operator, ExportHelper):
+    '''Selection to an ASCII Autodesk FBX'''
+    bl_idname = "export_scene.fbx"
+    bl_label = "Export FBX"
+    bl_options = {'PRESET'}
+
+    filename_ext = ".fbx"
+    filter_glob = StringProperty(default="*.fbx", options={'HIDDEN'})
+
+    # List of operator properties, the attributes will be assigned
+    # to the class instance from the operator settings before calling.
+
+    EXP_OBS_SELECTED = BoolProperty(name="Selected Objects", description="Export selected objects on visible layers", default=True)
+# 	EXP_OBS_SCENE = BoolProperty(name="Scene Objects", description="Export all objects in this scene", default=True)
+    TX_SCALE = FloatProperty(name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0)
+    TX_XROT90 = BoolProperty(name="Rot X90", description="Rotate all objects 90 degrees about the X axis", default=True)
+    TX_YROT90 = BoolProperty(name="Rot Y90", description="Rotate all objects 90 degrees about the Y axis", default=False)
+    TX_ZROT90 = BoolProperty(name="Rot Z90", description="Rotate all objects 90 degrees about the Z axis", default=False)
+    EXP_EMPTY = BoolProperty(name="Empties", description="Export empty objects", default=True)
+    EXP_CAMERA = BoolProperty(name="Cameras", description="Export camera objects", default=True)
+    EXP_LAMP = BoolProperty(name="Lamps", description="Export lamp objects", default=True)
+    EXP_ARMATURE = BoolProperty(name="Armatures", description="Export armature objects", default=True)
+    EXP_MESH = BoolProperty(name="Meshes", description="Export mesh objects", default=True)
+    EXP_MESH_APPLY_MOD = BoolProperty(name="Modifiers", description="Apply modifiers to mesh objects", default=True)
+#    EXP_MESH_HQ_NORMALS = BoolProperty(name="HQ Normals", description="Generate high quality normals", default=True)
+    EXP_IMAGE_COPY = BoolProperty(name="Copy Image Files", description="Copy image files to the destination path", default=False)
+    # armature animation
+    ANIM_ENABLE = BoolProperty(name="Enable Animation", description="Export keyframe animation", default=True)
+    ANIM_OPTIMIZE = BoolProperty(name="Optimize Keyframes", description="Remove double keyframes", default=True)
+    ANIM_OPTIMIZE_PRECISSION = FloatProperty(name="Precision", description="Tolerence for comparing double keyframes (higher for greater accuracy)", min=1, max=16, soft_min=1, soft_max=16, default=6.0)
+# 	ANIM_ACTION_ALL = BoolProperty(name="Current Action", description="Use actions currently applied to the armatures (use scene start/end frame)", default=True)
+    ANIM_ACTION_ALL = BoolProperty(name="All Actions", description="Use all actions for armatures, if false, use current action", default=False)
+    # batch
+    BATCH_ENABLE = BoolProperty(name="Enable Batch", description="Automate exporting multiple scenes or groups to files", default=False)
+    BATCH_GROUP = BoolProperty(name="Group > File", description="Export each group as an FBX file, if false, export each scene as an FBX file", default=False)
+    BATCH_OWN_DIR = BoolProperty(name="Own Dir", description="Create a dir for each exported file", default=True)
+    BATCH_FILE_PREFIX = StringProperty(name="Prefix", description="Prefix each file with this name", maxlen=1024, default="")
+
+    def execute(self, context):
+        import math
+        from mathutils import Matrix
+        if not self.filepath:
+            raise Exception("filepath not set")
+
+        mtx4_x90n = Matrix.Rotation(-math.pi / 2.0, 4, 'X')
+        mtx4_y90n = Matrix.Rotation(-math.pi / 2.0, 4, 'Y')
+        mtx4_z90n = Matrix.Rotation(-math.pi / 2.0, 4, 'Z')
+
+        GLOBAL_MATRIX = Matrix()
+        GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = self.TX_SCALE
+        if self.TX_XROT90:
+            GLOBAL_MATRIX = mtx4_x90n * GLOBAL_MATRIX
+        if self.TX_YROT90:
+            GLOBAL_MATRIX = mtx4_y90n * GLOBAL_MATRIX
+        if self.TX_ZROT90:
+            GLOBAL_MATRIX = mtx4_z90n * GLOBAL_MATRIX
+
+        keywords = self.as_keywords(ignore=("TX_XROT90", "TX_YROT90", "TX_ZROT90", "TX_SCALE", "check_existing", "filter_glob"))
+        keywords["GLOBAL_MATRIX"] = GLOBAL_MATRIX
+
+        import io_scene_fbx.export_fbx
+        return io_scene_fbx.export_fbx.save(self, context, **keywords)
+
+
+def menu_func(self, context):
+    self.layout.operator(ExportFBX.bl_idname, text="Autodesk FBX (.fbx)")
+
+
+def register():
+    bpy.types.INFO_MT_file_export.append(menu_func)
+
+
+def unregister():
+    bpy.types.INFO_MT_file_export.remove(menu_func)
+
+if __name__ == "__main__":
+    register()
diff --git a/io_scene_fbx/export_fbx.py b/io_scene_fbx/export_fbx.py
new file mode 100644
index 0000000000000000000000000000000000000000..40cac6ddb4dac72409f145e5db8a6c450f060225
--- /dev/null
+++ b/io_scene_fbx/export_fbx.py
@@ -0,0 +1,2910 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+
+"""
+This script is an exporter to the FBX file format.
+
+http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx
+"""
+
+import os
+import time
+import math  # math.pi
+import shutil  # for file copying
+
+import bpy
+from mathutils import Vector, Euler, Matrix
+
+
+# XXX not used anymore, images are copied one at a time
+def copy_images(dest_dir, textures):
+    import shutil
+
+    if not dest_dir.endswith(os.sep):
+        dest_dir += os.sep
+
+    image_paths = set()
+    for tex in textures:
+        image_paths.add(bpy.path.abspath(tex.filepath))
+
+    # Now copy images
+    copyCount = 0
+    for image_path in image_paths:
+        if Blender.sys.exists(image_path):
+            # Make a name for the target path.
+            dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
+            if not Blender.sys.exists(dest_image_path):  # Image isnt already there
+                print("\tCopying %r > %r" % (image_path, dest_image_path))
+                try:
+                    shutil.copy(image_path, dest_image_path)
+                    copyCount += 1
+                except:
+                    print("\t\tWarning, file failed to copy, skipping.")
+
+    print('\tCopied %d images' % copyCount)
+
+
+# I guess FBX uses degrees instead of radians (Arystan).
+# Call this function just before writing to FBX.
+# 180 / math.pi == 57.295779513
+def tuple_rad_to_deg(eul):
+    return eul[0] * 57.295779513, eul[1] * 57.295779513, eul[2] * 57.295779513
+
+# def strip_path(p):
+# 	return p.split('\\')[-1].split('/')[-1]
+
+# Used to add the scene name into the filepath without using odd chars
+sane_name_mapping_ob = {}
+sane_name_mapping_mat = {}
+sane_name_mapping_tex = {}
+sane_name_mapping_take = {}
+sane_name_mapping_group = {}
+
+# Make sure reserved names are not used
+sane_name_mapping_ob['Scene'] = 'Scene_'
+sane_name_mapping_ob['blend_root'] = 'blend_root_'
+
+
+def increment_string(t):
+    name = t
+    num = ''
+    while name and name[-1].isdigit():
+        num = name[-1] + num
+        name = name[:-1]
+    if num:
+        return '%s%d' % (name, int(num) + 1)
+    else:
+        return name + '_0'
+
+
+# todo - Disallow the name 'Scene' and 'blend_root' - it will bugger things up.
+def sane_name(data, dct):
+    #if not data: return None
+
+    if type(data) == tuple:  # materials are paired up with images
+        data, other = data
+        use_other = True
+    else:
+        other = None
+        use_other = False
+
+    name = data.name if data else None
+    orig_name = name
+
+    if other:
+        orig_name_other = other.name
+        name = '%s #%s' % (name, orig_name_other)
+    else:
+        orig_name_other = None
+
+    # dont cache, only ever call once for each data type now,
+    # so as to avoid namespace collision between types - like with objects <-> bones
+    #try:		return dct[name]
+    #except:		pass
+
+    if not name:
+        name = 'unnamed'  # blank string, ASKING FOR TROUBLE!
+    else:
+
+        name = bpy.path.clean_name(name)  # use our own
+
+    while name in iter(dct.values()):
+        name = increment_string(name)
+
+    if use_other:  # even if other is None - orig_name_other will be a string or None
+        dct[orig_name, orig_name_other] = name
+    else:
+        dct[orig_name] = name
+
+    return name
+
+
+def sane_obname(data):
+    return sane_name(data, sane_name_mapping_ob)
+
+
+def sane_matname(data):
+    return sane_name(data, sane_name_mapping_mat)
+
+
+def sane_texname(data):
+    return sane_name(data, sane_name_mapping_tex)
+
+
+def sane_takename(data):
+    return sane_name(data, sane_name_mapping_take)
+
+
+def sane_groupname(data):
+    return sane_name(data, sane_name_mapping_group)
+
+# def derived_paths(fname_orig, basepath, FORCE_CWD=False):
+# 	'''
+# 	fname_orig - blender path, can be relative
+# 	basepath - fname_rel will be relative to this
+# 	FORCE_CWD - dont use the basepath, just add a ./ to the filepath.
+# 		use when we know the file will be in the basepath.
+# 	'''
+# 	fname = bpy.path.abspath(fname_orig)
+# # 	fname = Blender.sys.expandpath(fname_orig)
+# 	fname_strip = os.path.basename(fname)
+# # 	fname_strip = strip_path(fname)
+# 	if FORCE_CWD:
+# 		fname_rel = '.' + os.sep + fname_strip
+# 	else:
+# 		fname_rel = bpy.path.relpath(fname, basepath)
+# # 		fname_rel = Blender.sys.relpath(fname, basepath)
+# 	if fname_rel.startswith('//'): fname_rel = '.' + os.sep + fname_rel[2:]
+# 	return fname, fname_strip, fname_rel
+
+
+def mat4x4str(mat):
+    return '%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f' % tuple([f for v in mat for f in v])
+
+
+# XXX not used
+# duplicated in OBJ exporter
+def getVertsFromGroup(me, group_index):
+    ret = []
+
+    for i, v in enumerate(me.vertices):
+        for g in v.groups:
+            if g.group == group_index:
+                ret.append((i, g.weight))
+
+        return ret
+
+
+# ob must be OB_MESH
+def BPyMesh_meshWeight2List(ob, me):
+    ''' Takes a mesh and return its group names and a list of lists, one list per vertex.
+    aligning the each vert list with the group names, each list contains float value for the weight.
+    These 2 lists can be modified and then used with list2MeshWeight to apply the changes.
+    '''
+
+    # Clear the vert group.
+    groupNames = [g.name for g in ob.vertex_groups]
+    len_groupNames = len(groupNames)
+
+    if not len_groupNames:
+        # no verts? return a vert aligned empty list
+        return [[] for i in range(len(me.vertices))], []
+    else:
+        vWeightList = [[0.0] * len_groupNames for i in range(len(me.vertices))]
+
+    for i, v in enumerate(me.vertices):
+        for g in v.groups:
+            vWeightList[i][g.group] = g.weight
+
+    return groupNames, vWeightList
+
+
+def meshNormalizedWeights(ob, me):
+    try:  # account for old bad BPyMesh
+        groupNames, vWeightList = BPyMesh_meshWeight2List(ob, me)
+    except:
+        return [], []
+
+    if not groupNames:
+        return [], []
+
+    for i, vWeights in enumerate(vWeightList):
+        tot = 0.0
+        for w in vWeights:
+            tot += w
+
+        if tot:
+            for j, w in enumerate(vWeights):
+                vWeights[j] = w / tot
+
+    return groupNames, vWeightList
+
+header_comment = \
+'''; FBX 6.1.0 project file
+; Created by Blender FBX Exporter
+; for support mail: ideasman42@gmail.com
+; ----------------------------------------------------
+
+'''
+
+
+# This func can be called with just the filepath
+def save(operator, context, filepath="",
+        GLOBAL_MATRIX=None,
+        EXP_OBS_SELECTED=True,
+        EXP_MESH=True,
+        EXP_MESH_APPLY_MOD=True,
+        EXP_ARMATURE=True,
+        EXP_LAMP=True,
+        EXP_CAMERA=True,
+        EXP_EMPTY=True,
+        EXP_IMAGE_COPY=False,
+        ANIM_ENABLE=True,
+        ANIM_OPTIMIZE=True,
+        ANIM_OPTIMIZE_PRECISSION=6,
+        ANIM_ACTION_ALL=False,
+        BATCH_ENABLE=False,
+        BATCH_GROUP=True,
+        BATCH_FILE_PREFIX='',
+        BATCH_OWN_DIR=False
+    ):
+
+    #XXX, missing arg
+    batch_objects = None
+
+    # testing
+    mtx_x90 = Matrix.Rotation(math.pi / 2.0, 3, 'X')  # used
+    mtx4_z90 = Matrix.Rotation(math.pi / 2.0, 4, 'Z')
+
+    if GLOBAL_MATRIX is None:
+        GLOBAL_MATRIX = Matrix()
+
+    if bpy.ops.object.mode_set.poll():
+        bpy.ops.object.mode_set(mode='OBJECT')
+
+    # ----------------- Batch support!
+    if BATCH_ENABLE:
+        fbxpath = filepath
+
+        # get the path component of filepath
+        tmp_exists = bpy.utils.exists(fbxpath)
+
+        if tmp_exists != 2:  # a file, we want a path
+            fbxpath = os.path.dirname(fbxpath)
+# 			while fbxpath and fbxpath[-1] not in ('/', '\\'):
+# 				fbxpath = fbxpath[:-1]
+            if not fbxpath:
+                # XXX
+                print('Error%t|Directory does not exist!')
+# 				Draw.PupMenu('Error%t|Directory does not exist!')
+                return
+
+            tmp_exists = bpy.utils.exists(fbxpath)
+
+        if tmp_exists != 2:
+            # XXX
+            print('Error%t|Directory does not exist!')
+# 			Draw.PupMenu('Error%t|Directory does not exist!')
+            return
+
+        if not fbxpath.endswith(os.sep):
+            fbxpath += os.sep
+        del tmp_exists
+
+        if BATCH_GROUP:
+            data_seq = bpy.data.groups
+        else:
+            data_seq = bpy.data.scenes
+
+        # call this function within a loop with BATCH_ENABLE == False
+        orig_sce = context.scene
+
+        new_fbxpath = fbxpath  # own dir option modifies, we need to keep an original
+        for data in data_seq:  # scene or group
+            newname = BATCH_FILE_PREFIX + bpy.path.clean_name(data.name)
+
+            if BATCH_OWN_DIR:
+                new_fbxpath = fbxpath + newname + os.sep
+                # path may already exist
+                # TODO - might exist but be a file. unlikely but should probably account for it.
+
+                if bpy.utils.exists(new_fbxpath) == 0:
+# 				if Blender.sys.exists(new_fbxpath) == 0:
+                    os.mkdir(new_fbxpath)
+
+            filepath = new_fbxpath + newname + '.fbx'
+
+            print('\nBatch exporting %s as...\n\t%r' % (data, filepath))
+
+            # XXX don't know what to do with this, probably do the same? (Arystan)
+            if BATCH_GROUP:  # group
+                # group, so objects update properly, add a dummy scene.
+                scene = bpy.data.scenes.new()
+                scene.Layers = (1 << 20) - 1
+                bpy.data.scenes.active = scene
+                for ob_base in data.objects:
+                    scene.objects.link(ob_base)
+
+                scene.update(1)
+
+                # TODO - BUMMER! Armatures not in the group wont animate the mesh
+
+            else:  # scene
+                data_seq.active = data
+
+            # Call self with modified args
+            # Dont pass batch options since we already usedt them
+            write(filepath, data.objects,
+                context,
+                False,
+                EXP_MESH,
+                EXP_MESH_APPLY_MOD,
+                EXP_ARMATURE,
+                EXP_LAMP,
+                EXP_CAMERA,
+                EXP_EMPTY,
+                EXP_IMAGE_COPY,
+                GLOBAL_MATRIX,
+                ANIM_ENABLE,
+                ANIM_OPTIMIZE,
+                ANIM_OPTIMIZE_PRECISSION,
+                ANIM_ACTION_ALL
+            )
+
+            if BATCH_GROUP:
+                # remove temp group scene
+                bpy.data.scenes.unlink(scene)
+
+        bpy.data.scenes.active = orig_sce
+
+        return  # so the script wont run after we have batch exported.
+
+    # end batch support
+
+    # Use this for working out paths relative to the export location
+    basepath = os.path.dirname(filepath) or '.'
+    basepath += os.sep
+# 	basepath = Blender.sys.dirname(filepath)
+
+    # ----------------------------------------------
+    # storage classes
+    class my_bone_class(object):
+        __slots__ = ("blenName",
+                     "blenBone",
+                     "blenMeshes",
+                     "restMatrix",
+                     "parent",
+                     "blenName",
+                     "fbxName",
+                     "fbxArm",
+                     "__pose_bone",
+                     "__anim_poselist")
+
+        def __init__(self, blenBone, fbxArm):
+
+            # This is so 2 armatures dont have naming conflicts since FBX bones use object namespace
+            self.fbxName = sane_obname(blenBone)
+
+            self.blenName = blenBone.name
+            self.blenBone = blenBone
+            self.blenMeshes = {}					# fbxMeshObName : mesh
+            self.fbxArm = fbxArm
+            self.restMatrix = blenBone.matrix_local
+# 			self.restMatrix =		blenBone.matrix['ARMATURESPACE']
+
+            # not used yet
+            # self.restMatrixInv =	self.restMatrix.copy().invert()
+            # self.restMatrixLocal =	None # set later, need parent matrix
+
+            self.parent = None
+
+            # not public
+            pose = fbxArm.blenObject.pose
+            self.__pose_bone = pose.bones[self.blenName]
+
+            # store a list if matricies here, (poseMatrix, head, tail)
+            # {frame:posematrix, frame:posematrix, ...}
+            self.__anim_poselist = {}
+
+        '''
+        def calcRestMatrixLocal(self):
+            if self.parent:
+                self.restMatrixLocal = self.restMatrix * self.parent.restMatrix.copy().invert()
+            else:
+                self.restMatrixLocal = self.restMatrix.copy()
+        '''
+        def setPoseFrame(self, f):
+            # cache pose info here, frame must be set beforehand
+
+            # Didnt end up needing head or tail, if we do - here it is.
+            '''
+            self.__anim_poselist[f] = (\
+                self.__pose_bone.poseMatrix.copy(),\
+                self.__pose_bone.head.copy(),\
+                self.__pose_bone.tail.copy() )
+            '''
+
+            self.__anim_poselist[f] = self.__pose_bone.matrix.copy()
+
+        # get pose from frame.
+        def getPoseMatrix(self, f):  # ----------------------------------------------
+            return self.__anim_poselist[f]
+        '''
+        def getPoseHead(self, f):
+            #return self.__pose_bone.head.copy()
+            return self.__anim_poselist[f][1].copy()
+        def getPoseTail(self, f):
+            #return self.__pose_bone.tail.copy()
+            return self.__anim_poselist[f][2].copy()
+        '''
+        # end
+
+        def getAnimParRelMatrix(self, frame):
+            #arm_mat = self.fbxArm.matrixWorld
+            #arm_mat = self.fbxArm.parRelMatrix()
+            if not self.parent:
+                #return mtx4_z90 * (self.getPoseMatrix(frame) * arm_mat) # dont apply arm matrix anymore
+                return self.getPoseMatrix(frame) * mtx4_z90
+            else:
+                #return (mtx4_z90 * ((self.getPoseMatrix(frame) * arm_mat)))  *  (mtx4_z90 * (self.parent.getPoseMatrix(frame) * arm_mat)).invert()
+                return (self.parent.getPoseMatrix(frame) * mtx4_z90).invert() * ((self.getPoseMatrix(frame)) * mtx4_z90)
+
+        # we need thes because cameras and lights modified rotations
+        def getAnimParRelMatrixRot(self, frame):
+            return self.getAnimParRelMatrix(frame)
+
+        def flushAnimData(self):
+            self.__anim_poselist.clear()
+
+    class my_object_generic(object):
+        __slots__ = ("fbxName",
+                     "blenObject",
+                     "blenData",
+                     "origData",
+                     "blenTextures",
+                     "blenMaterials",
+                     "blenMaterialList",
+                     "blenAction",
+                     "blenActionList",
+                     "fbxGroupNames",
+                     "fbxParent",
+                     "fbxBoneParent",
+                     "fbxBones",
+                     "fbxArm",
+                     "matrixWorld",
+                     "__anim_poselist",
+                     )
+
+        # Other settings can be applied for each type - mesh, armature etc.
+        def __init__(self, ob, matrixWorld=None):
+            self.fbxName = sane_obname(ob)
+            self.blenObject = ob
+            self.fbxGroupNames = []
+            self.fbxParent = None  # set later on IF the parent is in the selection.
+            if matrixWorld:
+                self.matrixWorld = GLOBAL_MATRIX * matrixWorld
+            else:
+                self.matrixWorld = GLOBAL_MATRIX * ob.matrix_world
+
+            self.__anim_poselist = {}  # we should only access this
+
+        def parRelMatrix(self):
+            if self.fbxParent:
+                return self.fbxParent.matrixWorld.copy().invert() * self.matrixWorld
+            else:
+                return self.matrixWorld
+
+        def setPoseFrame(self, f, fake=False):
+            if fake:
+                # annoying, have to clear GLOBAL_MATRIX
+                self.__anim_poselist[f] = self.matrixWorld * GLOBAL_MATRIX.copy().invert()
+            else:
+                self.__anim_poselist[f] = self.blenObject.matrix_world.copy()
+
+        def getAnimParRelMatrix(self, frame):
+            if self.fbxParent:
+                #return (self.__anim_poselist[frame] * self.fbxParent.__anim_poselist[frame].copy().invert() ) * GLOBAL_MATRIX
+                return (GLOBAL_MATRIX * self.fbxParent.__anim_poselist[frame]).invert() * (GLOBAL_MATRIX * self.__anim_poselist[frame])
+            else:
+                return GLOBAL_MATRIX * self.__anim_poselist[frame]
+
+        def getAnimParRelMatrixRot(self, frame):
+            obj_type = self.blenObject.type
+            if self.fbxParent:
+                matrix_rot = ((GLOBAL_MATRIX * self.fbxParent.__anim_poselist[frame]).invert() * (GLOBAL_MATRIX * self.__anim_poselist[frame])).rotation_part()
+            else:
+                matrix_rot = (GLOBAL_MATRIX * self.__anim_poselist[frame]).rotation_part()
+
+            # Lamps need to be rotated
+            if obj_type == 'LAMP':
+                matrix_rot = matrix_rot * mtx_x90
+            elif obj_type == 'CAMERA':
+                y = Vector((0.0, 1.0, 0.0)) * matrix_rot
+                matrix_rot = Matrix.Rotation(math.pi / 2.0, 3, y) * matrix_rot
+
+            return matrix_rot
+
+    # ----------------------------------------------
+
+    print('\nFBX export starting... %r' % filepath)
+    start_time = time.clock()
+    try:
+        file = open(filepath, 'w', encoding='utf8')
+    except:
+        return False
+
+    scene = context.scene
+    world = scene.world
+
+    # ---------------------------- Write the header first
+    file.write(header_comment)
+    if time:
+        curtime = time.localtime()[0:6]
+    else:
+        curtime = (0, 0, 0, 0, 0, 0)
+    #
+    file.write(\
+'''FBXHeaderExtension:  {
+    FBXHeaderVersion: 1003
+    FBXVersion: 6100
+    CreationTimeStamp:  {
+        Version: 1000
+        Year: %.4i
+        Month: %.2i
+        Day: %.2i
+        Hour: %.2i
+        Minute: %.2i
+        Second: %.2i
+        Millisecond: 0
+    }
+    Creator: "FBX SDK/FBX Plugins build 20070228"
+    OtherFlags:  {
+        FlagPLE: 0
+    }
+}''' % (curtime))
+
+    file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime)
+    file.write('\nCreator: "Blender version %s"' % bpy.app.version_string)
+
+    pose_items = []  # list of (fbxName, matrix) to write pose data for, easier to collect allong the way
+
+    # --------------- funcs for exporting
+    def object_tx(ob, loc, matrix, matrix_mod=None):
+        '''
+        Matrix mod is so armature objects can modify their bone matricies
+        '''
+        if isinstance(ob, bpy.types.Bone):
+# 		if isinstance(ob, Blender.Types.BoneType):
+
+            # we know we have a matrix
+            # matrix = mtx4_z90 * (ob.matrix['ARMATURESPACE'] * matrix_mod)
+            matrix = ob.matrix_local * mtx4_z90  # dont apply armature matrix anymore
+# 			matrix = mtx4_z90 * ob.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
+
+            parent = ob.parent
+            if parent:
+                #par_matrix = mtx4_z90 * (parent.matrix['ARMATURESPACE'] * matrix_mod)
+                par_matrix = parent.matrix_local * mtx4_z90  # dont apply armature matrix anymore
+# 				par_matrix = mtx4_z90 * parent.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
+                matrix = par_matrix.copy().invert() * matrix
+
+            loc, rot, scale = matrix.decompose()
+            matrix_rot = rot.to_matrix()
+
+            loc = tuple(loc)
+            rot = tuple(rot.to_euler())  # quat -> euler
+            scale = tuple(scale)
+        else:
+            # This is bad because we need the parent relative matrix from the fbx parent (if we have one), dont use anymore
+            #if ob and not matrix: matrix = ob.matrix_world * GLOBAL_MATRIX
+            if ob and not matrix:
+                raise Exception("error: this should never happen!")
+
+            matrix_rot = matrix
+            #if matrix:
+            #	matrix = matrix_scale * matrix
+
+            if matrix:
+                loc, rot, scale = matrix.decompose()
+                matrix_rot = rot.to_matrix()
+
+                # Lamps need to be rotated
+                if ob and ob.type == 'LAMP':
+                    matrix_rot = matrix_rot * mtx_x90
+                elif ob and ob.type == 'CAMERA':
+                    y = Vector((0.0, 1.0, 0.0)) * matrix_rot
+                    matrix_rot = Matrix.Rotation(math.pi / 2.0, 3, y) * matrix_rot
+                # else do nothing.
+
+                loc = tuple(loc)
+                rot = tuple(matrix_rot.to_euler())
+                scale = tuple(scale)
+            else:
+                if not loc:
+                    loc = 0.0, 0.0, 0.0
+                scale = 1.0, 1.0, 1.0
+                rot = 0.0, 0.0, 0.0
+
+        return loc, rot, scale, matrix, matrix_rot
+
+    def write_object_tx(ob, loc, matrix, matrix_mod=None):
+        '''
+        We have loc to set the location if non blender objects that have a location
+
+        matrix_mod is only used for bones at the moment
+        '''
+        loc, rot, scale, matrix, matrix_rot = object_tx(ob, loc, matrix, matrix_mod)
+
+        file.write('\n\t\t\tProperty: "Lcl Translation", "Lcl Translation", "A+",%.15f,%.15f,%.15f' % loc)
+        file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % tuple_rad_to_deg(rot))
+# 		file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % rot)
+        file.write('\n\t\t\tProperty: "Lcl Scaling", "Lcl Scaling", "A+",%.15f,%.15f,%.15f' % scale)
+        return loc, rot, scale, matrix, matrix_rot
+
+    def write_object_props(ob=None, loc=None, matrix=None, matrix_mod=None):
+        # if the type is 0 its an empty otherwise its a mesh
+        # only difference at the moment is one has a color
+        file.write('''
+        Properties60:  {
+            Property: "QuaternionInterpolate", "bool", "",0
+            Property: "Visibility", "Visibility", "A+",1''')
+
+        loc, rot, scale, matrix, matrix_rot = write_object_tx(ob, loc, matrix, matrix_mod)
+
+        # Rotation order, note, for FBX files Iv loaded normal order is 1
+        # setting to zero.
+        # eEULER_XYZ = 0
+        # eEULER_XZY
+        # eEULER_YZX
+        # eEULER_YXZ
+        # eEULER_ZXY
+        # eEULER_ZYX
+
+        file.write('''
+            Property: "RotationOffset", "Vector3D", "",0,0,0
+            Property: "RotationPivot", "Vector3D", "",0,0,0
+            Property: "ScalingOffset", "Vector3D", "",0,0,0
+            Property: "ScalingPivot", "Vector3D", "",0,0,0
+            Property: "TranslationActive", "bool", "",0
+            Property: "TranslationMin", "Vector3D", "",0,0,0
+            Property: "TranslationMax", "Vector3D", "",0,0,0
+            Property: "TranslationMinX", "bool", "",0
+            Property: "TranslationMinY", "bool", "",0
+            Property: "TranslationMinZ", "bool", "",0
+            Property: "TranslationMaxX", "bool", "",0
+            Property: "TranslationMaxY", "bool", "",0
+            Property: "TranslationMaxZ", "bool", "",0
+            Property: "RotationOrder", "enum", "",0
+            Property: "RotationSpaceForLimitOnly", "bool", "",0
+            Property: "AxisLen", "double", "",10
+            Property: "PreRotation", "Vector3D", "",0,0,0
+            Property: "PostRotation", "Vector3D", "",0,0,0
+            Property: "RotationActive", "bool", "",0
+            Property: "RotationMin", "Vector3D", "",0,0,0
+            Property: "RotationMax", "Vector3D", "",0,0,0
+            Property: "RotationMinX", "bool", "",0
+            Property: "RotationMinY", "bool", "",0
+            Property: "RotationMinZ", "bool", "",0
+            Property: "RotationMaxX", "bool", "",0
+            Property: "RotationMaxY", "bool", "",0
+            Property: "RotationMaxZ", "bool", "",0
+            Property: "RotationStiffnessX", "double", "",0
+            Property: "RotationStiffnessY", "double", "",0
+            Property: "RotationStiffnessZ", "double", "",0
+            Property: "MinDampRangeX", "double", "",0
+            Property: "MinDampRangeY", "double", "",0
+            Property: "MinDampRangeZ", "double", "",0
+            Property: "MaxDampRangeX", "double", "",0
+            Property: "MaxDampRangeY", "double", "",0
+            Property: "MaxDampRangeZ", "double", "",0
+            Property: "MinDampStrengthX", "double", "",0
+            Property: "MinDampStrengthY", "double", "",0
+            Property: "MinDampStrengthZ", "double", "",0
+            Property: "MaxDampStrengthX", "double", "",0
+            Property: "MaxDampStrengthY", "double", "",0
+            Property: "MaxDampStrengthZ", "double", "",0
+            Property: "PreferedAngleX", "double", "",0
+            Property: "PreferedAngleY", "double", "",0
+            Property: "PreferedAngleZ", "double", "",0
+            Property: "InheritType", "enum", "",0
+            Property: "ScalingActive", "bool", "",0
+            Property: "ScalingMin", "Vector3D", "",1,1,1
+            Property: "ScalingMax", "Vector3D", "",1,1,1
+            Property: "ScalingMinX", "bool", "",0
+            Property: "ScalingMinY", "bool", "",0
+            Property: "ScalingMinZ", "bool", "",0
+            Property: "ScalingMaxX", "bool", "",0
+            Property: "ScalingMaxY", "bool", "",0
+            Property: "ScalingMaxZ", "bool", "",0
+            Property: "GeometricTranslation", "Vector3D", "",0,0,0
+            Property: "GeometricRotation", "Vector3D", "",0,0,0
+            Property: "GeometricScaling", "Vector3D", "",1,1,1
+            Property: "LookAtProperty", "object", ""
+            Property: "UpVectorProperty", "object", ""
+            Property: "Show", "bool", "",1
+            Property: "NegativePercentShapeSupport", "bool", "",1
+            Property: "DefaultAttributeIndex", "int", "",0''')
+        if ob and not isinstance(ob, bpy.types.Bone):
+            # Only mesh objects have color
+            file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+            file.write('\n\t\t\tProperty: "Size", "double", "",100')
+            file.write('\n\t\t\tProperty: "Look", "enum", "",1')
+
+        return loc, rot, scale, matrix, matrix_rot
+
+    # -------------------------------------------- Armatures
+    #def write_bone(bone, name, matrix_mod):
+    def write_bone(my_bone):
+        file.write('\n\tModel: "Model::%s", "Limb" {' % my_bone.fbxName)
+        file.write('\n\t\tVersion: 232')
+
+        #poseMatrix = write_object_props(my_bone.blenBone, None, None, my_bone.fbxArm.parRelMatrix())[3]
+        poseMatrix = write_object_props(my_bone.blenBone)[3]  # dont apply bone matricies anymore
+        pose_items.append((my_bone.fbxName, poseMatrix))
+
+        # file.write('\n\t\t\tProperty: "Size", "double", "",%.6f' % ((my_bone.blenData.head['ARMATURESPACE'] - my_bone.blenData.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
+        file.write('\n\t\t\tProperty: "Size", "double", "",1')
+
+        #((my_bone.blenData.head['ARMATURESPACE'] * my_bone.fbxArm.matrixWorld) - (my_bone.blenData.tail['ARMATURESPACE'] * my_bone.fbxArm.parRelMatrix())).length)
+
+        """
+        file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\
+            ((my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
+        """
+
+        file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %
+                   (my_bone.blenBone.head_local - my_bone.blenBone.tail_local).length)
+# 			(my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']).length)
+
+        #file.write('\n\t\t\tProperty: "LimbLength", "double", "",1')
+        file.write('\n\t\t\tProperty: "Color", "ColorRGB", "",0.8,0.8,0.8')
+        file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+        file.write('\n\t\t}')
+        file.write('\n\t\tMultiLayer: 0')
+        file.write('\n\t\tMultiTake: 1')
+        file.write('\n\t\tShading: Y')
+        file.write('\n\t\tCulling: "CullingOff"')
+        file.write('\n\t\tTypeFlags: "Skeleton"')
+        file.write('\n\t}')
+
+    def write_camera_switch():
+        file.write('''
+    Model: "Model::Camera Switcher", "CameraSwitcher" {
+        Version: 232''')
+
+        write_object_props()
+        file.write('''
+            Property: "Color", "Color", "A",0.8,0.8,0.8
+            Property: "Camera Index", "Integer", "A+",100
+        }
+        MultiLayer: 0
+        MultiTake: 1
+        Hidden: "True"
+        Shading: W
+        Culling: "CullingOff"
+        Version: 101
+        Name: "Model::Camera Switcher"
+        CameraId: 0
+        CameraName: 100
+        CameraIndexName:
+    }''')
+
+    def write_camera_dummy(name, loc, near, far, proj_type, up):
+        file.write('\n\tModel: "Model::%s", "Camera" {' % name)
+        file.write('\n\t\tVersion: 232')
+        write_object_props(None, loc)
+
+        file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+        file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
+        file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",40')
+        file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
+        file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
+        file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",0')
+        file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",0')
+        file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0.63,0.63,0.63')
+        file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
+        file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
+        file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
+        file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
+        file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
+        file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
+        file.write('\n\t\t\tProperty: "GateFit", "enum", "",0')
+        file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",21.3544940948486')
+        file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
+        file.write('\n\t\t\tProperty: "AspectW", "double", "",320')
+        file.write('\n\t\t\tProperty: "AspectH", "double", "",200')
+        file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",1')
+        file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
+        file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
+        file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
+        file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % near)
+        file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % far)
+        file.write('\n\t\t\tProperty: "FilmWidth", "double", "",0.816')
+        file.write('\n\t\t\tProperty: "FilmHeight", "double", "",0.612')
+        file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",1.33333333333333')
+        file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
+        file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",4')
+        file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
+        file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
+        file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
+        file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
+        file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
+        file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
+        file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
+        file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
+        file.write('\n\t\t\tProperty: "Center", "bool", "",1')
+        file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
+        file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
+        file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
+        file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
+        file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
+        file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
+        file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",1.33333333333333')
+        file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
+        file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
+        file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
+        file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
+        file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",%i' % proj_type)
+        file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
+        file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
+        file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
+        file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
+        file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
+        file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
+        file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
+        file.write('\n\t\t}')
+        file.write('\n\t\tMultiLayer: 0')
+        file.write('\n\t\tMultiTake: 0')
+        file.write('\n\t\tHidden: "True"')
+        file.write('\n\t\tShading: Y')
+        file.write('\n\t\tCulling: "CullingOff"')
+        file.write('\n\t\tTypeFlags: "Camera"')
+        file.write('\n\t\tGeometryVersion: 124')
+        file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
+        file.write('\n\t\tUp: %i,%i,%i' % up)
+        file.write('\n\t\tLookAt: 0,0,0')
+        file.write('\n\t\tShowInfoOnMoving: 1')
+        file.write('\n\t\tShowAudio: 0')
+        file.write('\n\t\tAudioColor: 0,1,0')
+        file.write('\n\t\tCameraOrthoZoom: 1')
+        file.write('\n\t}')
+
+    def write_camera_default():
+        # This sucks but to match FBX converter its easier to
+        # write the cameras though they are not needed.
+        write_camera_dummy('Producer Perspective', (0, 71.3, 287.5), 10, 4000, 0, (0, 1, 0))
+        write_camera_dummy('Producer Top', (0, 4000, 0), 1, 30000, 1, (0, 0, -1))
+        write_camera_dummy('Producer Bottom', (0, -4000, 0), 1, 30000, 1, (0, 0, -1))
+        write_camera_dummy('Producer Front', (0, 0, 4000), 1, 30000, 1, (0, 1, 0))
+        write_camera_dummy('Producer Back', (0, 0, -4000), 1, 30000, 1, (0, 1, 0))
+        write_camera_dummy('Producer Right', (4000, 0, 0), 1, 30000, 1, (0, 1, 0))
+        write_camera_dummy('Producer Left', (-4000, 0, 0), 1, 30000, 1, (0, 1, 0))
+
+    def write_camera(my_cam):
+        '''
+        Write a blender camera
+        '''
+        render = scene.render
+        width = render.resolution_x
+        height = render.resolution_y
+        aspect = width / height
+
+        data = my_cam.blenObject.data
+
+        file.write('\n\tModel: "Model::%s", "Camera" {' % my_cam.fbxName)
+        file.write('\n\t\tVersion: 232')
+        loc, rot, scale, matrix, matrix_rot = write_object_props(my_cam.blenObject, None, my_cam.parRelMatrix())
+
+        file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
+        file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",%.6f' % math.degrees(data.angle))
+        file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
+        file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
+        # file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",14.0323972702026')
+        file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shift_x)  # not sure if this is in the correct units?
+        file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shift_y)  # ditto
+        file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0,0,0')
+        file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
+        file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
+        file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
+        file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
+        file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
+        file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
+        file.write('\n\t\t\tProperty: "GateFit", "enum", "",2')
+        file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
+        file.write('\n\t\t\tProperty: "AspectW", "double", "",%i' % width)
+        file.write('\n\t\t\tProperty: "AspectH", "double", "",%i' % height)
+
+        '''Camera aspect ratio modes.
+            0 If the ratio mode is eWINDOW_SIZE, both width and height values aren't relevant.
+            1 If the ratio mode is eFIXED_RATIO, the height value is set to 1.0 and the width value is relative to the height value.
+            2 If the ratio mode is eFIXED_RESOLUTION, both width and height values are in pixels.
+            3 If the ratio mode is eFIXED_WIDTH, the width value is in pixels and the height value is relative to the width value.
+            4 If the ratio mode is eFIXED_HEIGHT, the height value is in pixels and the width value is relative to the height value.
+
+        Definition at line 234 of file kfbxcamera.h. '''
+
+        file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",2')
+
+        file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
+        file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
+        file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
+        file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clip_start)
+        file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clip_end)
+        file.write('\n\t\t\tProperty: "FilmWidth", "double", "",1.0')
+        file.write('\n\t\t\tProperty: "FilmHeight", "double", "",1.0')
+        file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",%.6f' % aspect)
+        file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
+        file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",0')
+        file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
+        file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
+        file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
+        file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
+        file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
+        file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
+        file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
+        file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
+        file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
+        file.write('\n\t\t\tProperty: "Center", "bool", "",1')
+        file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
+        file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
+        file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
+        file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
+        file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
+        file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
+        file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",%.6f' % aspect)
+        file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
+        file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
+        file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
+        file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
+        file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",0')
+        file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
+        file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
+        file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
+        file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
+        file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
+        file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
+        file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
+
+        file.write('\n\t\t}')
+        file.write('\n\t\tMultiLayer: 0')
+        file.write('\n\t\tMultiTake: 0')
+        file.write('\n\t\tShading: Y')
+        file.write('\n\t\tCulling: "CullingOff"')
+        file.write('\n\t\tTypeFlags: "Camera"')
+        file.write('\n\t\tGeometryVersion: 124')
+        file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
+        file.write('\n\t\tUp: %.6f,%.6f,%.6f' % tuple(Vector((0.0, 1.0, 0.0)) * matrix_rot))
+        file.write('\n\t\tLookAt: %.6f,%.6f,%.6f' % tuple(Vector((0.0, 0.0, -1.0)) * matrix_rot))
+
+        #file.write('\n\t\tUp: 0,0,0' )
+        #file.write('\n\t\tLookAt: 0,0,0' )
+
+        file.write('\n\t\tShowInfoOnMoving: 1')
+        file.write('\n\t\tShowAudio: 0')
+        file.write('\n\t\tAudioColor: 0,1,0')
+        file.write('\n\t\tCameraOrthoZoom: 1')
+        file.write('\n\t}')
+
+    def write_light(my_light):
+        light = my_light.blenObject.data
+        file.write('\n\tModel: "Model::%s", "Light" {' % my_light.fbxName)
+        file.write('\n\t\tVersion: 232')
+
+        write_object_props(my_light.blenObject, None, my_light.parRelMatrix())
+
+        # Why are these values here twice?????? - oh well, follow the holy sdk's output
+
+        # Blender light types match FBX's, funny coincidence, we just need to
+        # be sure that all unsupported types are made into a point light
+        #ePOINT,
+        #eDIRECTIONAL
+        #eSPOT
+        light_type_items = {'POINT': 0, 'SUN': 1, 'SPOT': 2, 'HEMI': 3, 'AREA': 4}
+        light_type = light_type_items[light.type]
+
+        if light_type > 2:
+            light_type = 1  # hemi and area lights become directional
+
+# 		mode = light.mode
+        if light.shadow_method == 'RAY_SHADOW' or light.shadow_method == 'BUFFER_SHADOW':
+# 		if mode & Blender.Lamp.Modes.RayShadow or mode & Blender.Lamp.Modes.Shadows:
+            do_shadow = 1
+        else:
+            do_shadow = 0
+
+        if light.use_only_shadow or (not light.use_diffuse and not light.use_specular):
+# 		if mode & Blender.Lamp.Modes.OnlyShadow or (mode & Blender.Lamp.Modes.NoDiffuse and mode & Blender.Lamp.Modes.NoSpecular):
+            do_light = 0
+        else:
+            do_light = 1
+
+        scale = abs(GLOBAL_MATRIX.scale_part()[0])  # scale is always uniform in this case
+
+        file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
+        file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",1')
+        file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
+        file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
+        file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
+        file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
+        file.write('\n\t\t\tProperty: "Color", "Color", "A+",1,1,1')
+        file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy * 100.0, 200.0)))  # clamp below 200
+        if light.type == 'SPOT':
+            file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % math.degrees(light.spot_size))
+        file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
+        file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.color))
+
+        file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy * 100.0, 200.0)))  # clamp below 200
+
+        file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
+        file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
+        file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",%i' % do_light)
+        file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
+        file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
+        file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
+        file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
+        file.write('\n\t\t\tProperty: "DecayType", "enum", "",0')
+        file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.distance)
+        file.write('\n\t\t\tProperty: "EnableNearAttenuation", "bool", "",0')
+        file.write('\n\t\t\tProperty: "NearAttenuationStart", "double", "",0')
+        file.write('\n\t\t\tProperty: "NearAttenuationEnd", "double", "",0')
+        file.write('\n\t\t\tProperty: "EnableFarAttenuation", "bool", "",0')
+        file.write('\n\t\t\tProperty: "FarAttenuationStart", "double", "",0')
+        file.write('\n\t\t\tProperty: "FarAttenuationEnd", "double", "",0')
+        file.write('\n\t\t\tProperty: "CastShadows", "bool", "",%i' % do_shadow)
+        file.write('\n\t\t\tProperty: "ShadowColor", "ColorRGBA", "",0,0,0,1')
+        file.write('\n\t\t}')
+        file.write('\n\t\tMultiLayer: 0')
+        file.write('\n\t\tMultiTake: 0')
+        file.write('\n\t\tShading: Y')
+        file.write('\n\t\tCulling: "CullingOff"')
+        file.write('\n\t\tTypeFlags: "Light"')
+        file.write('\n\t\tGeometryVersion: 124')
+        file.write('\n\t}')
+
+    # matrixOnly is not used at the moment
+    def write_null(my_null=None, fbxName=None, matrixOnly=None):
+        # ob can be null
+        if not fbxName:
+            fbxName = my_null.fbxName
+
+        file.write('\n\tModel: "Model::%s", "Null" {' % fbxName)
+        file.write('\n\t\tVersion: 232')
+
+        # only use this for the root matrix at the moment
+        if matrixOnly:
+            poseMatrix = write_object_props(None, None, matrixOnly)[3]
+
+        else:  # all other Null's
+            if my_null:
+                poseMatrix = write_object_props(my_null.blenObject, None, my_null.parRelMatrix())[3]
+            else:
+                poseMatrix = write_object_props()[3]
+
+        pose_items.append((fbxName, poseMatrix))
+
+        file.write('''
+        }
+        MultiLayer: 0
+        MultiTake: 1
+        Shading: Y
+        Culling: "CullingOff"
+        TypeFlags: "Null"
+    }''')
+
+    # Material Settings
+    if world:
+        world_amb = world.ambient_color[:]
+    else:
+        world_amb = 0.0, 0.0, 0.0  # default value
+
+    def write_material(matname, mat):
+        file.write('\n\tMaterial: "Material::%s", "" {' % matname)
+
+        # Todo, add more material Properties.
+        if mat:
+            mat_cold = tuple(mat.diffuse_color)
+            mat_cols = tuple(mat.specular_color)
+            #mat_colm = tuple(mat.mirCol) # we wont use the mirror color
+            mat_colamb = world_amb
+
+            mat_dif = mat.diffuse_intensity
+            mat_amb = mat.ambient
+            mat_hard = (float(mat.specular_hardness) - 1.0) / 5.10
+            mat_spec = mat.specular_intensity / 2.0
+            mat_alpha = mat.alpha
+            mat_emit = mat.emit
+            mat_shadeless = mat.use_shadeless
+            if mat_shadeless:
+                mat_shader = 'Lambert'
+            else:
+                if mat.diffuse_shader == 'LAMBERT':
+                    mat_shader = 'Lambert'
+                else:
+                    mat_shader = 'Phong'
+        else:
+            mat_cols = mat_cold = 0.8, 0.8, 0.8
+            mat_colamb = 0.0, 0.0, 0.0
+            # mat_colm
+            mat_dif = 1.0
+            mat_amb = 0.5
+            mat_hard = 20.0
+            mat_spec = 0.2
+            mat_alpha = 1.0
+            mat_emit = 0.0
+            mat_shadeless = False
+            mat_shader = 'Phong'
+
+        file.write('\n\t\tVersion: 102')
+        file.write('\n\t\tShadingModel: "%s"' % mat_shader.lower())
+        file.write('\n\t\tMultiLayer: 0')
+
+        file.write('\n\t\tProperties60:  {')
+        file.write('\n\t\t\tProperty: "ShadingModel", "KString", "", "%s"' % mat_shader)
+        file.write('\n\t\t\tProperty: "MultiLayer", "bool", "",0')
+        file.write('\n\t\t\tProperty: "EmissiveColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold)  # emit and diffuse color are he same in blender
+        file.write('\n\t\t\tProperty: "EmissiveFactor", "double", "",%.4f' % mat_emit)
+
+        file.write('\n\t\t\tProperty: "AmbientColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_colamb)
+        file.write('\n\t\t\tProperty: "AmbientFactor", "double", "",%.4f' % mat_amb)
+        file.write('\n\t\t\tProperty: "DiffuseColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold)
+        file.write('\n\t\t\tProperty: "DiffuseFactor", "double", "",%.4f' % mat_dif)
+        file.write('\n\t\t\tProperty: "Bump", "Vector3D", "",0,0,0')
+        file.write('\n\t\t\tProperty: "TransparentColor", "ColorRGB", "",1,1,1')
+        file.write('\n\t\t\tProperty: "TransparencyFactor", "double", "",%.4f' % (1.0 - mat_alpha))
+        if not mat_shadeless:
+            file.write('\n\t\t\tProperty: "SpecularColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cols)
+            file.write('\n\t\t\tProperty: "SpecularFactor", "double", "",%.4f' % mat_spec)
+            file.write('\n\t\t\tProperty: "ShininessExponent", "double", "",80.0')
+            file.write('\n\t\t\tProperty: "ReflectionColor", "ColorRGB", "",0,0,0')
+            file.write('\n\t\t\tProperty: "ReflectionFactor", "double", "",1')
+        file.write('\n\t\t\tProperty: "Emissive", "ColorRGB", "",0,0,0')
+        file.write('\n\t\t\tProperty: "Ambient", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_colamb)
+        file.write('\n\t\t\tProperty: "Diffuse", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cold)
+        if not mat_shadeless:
+            file.write('\n\t\t\tProperty: "Specular", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cols)
+            file.write('\n\t\t\tProperty: "Shininess", "double", "",%.1f' % mat_hard)
+        file.write('\n\t\t\tProperty: "Opacity", "double", "",%.1f' % mat_alpha)
+        if not mat_shadeless:
+            file.write('\n\t\t\tProperty: "Reflectivity", "double", "",0')
+
+        file.write('\n\t\t}')
+        file.write('\n\t}')
+
+    def copy_image(image):
+        fn = bpy.path.abspath(image.filepath)
+        fn_strip = os.path.basename(fn)
+
+        if EXP_IMAGE_COPY:
+            rel = fn_strip
+            fn_abs_dest = os.path.join(basepath, fn_strip)
+            if not os.path.exists(fn_abs_dest):
+                shutil.copy(fn, fn_abs_dest)
+        elif bpy.path.is_subdir(fn, basepath):
+            rel = os.path.relpath(fn, basepath)
+        else:
+            rel = fn
+
+        return (rel, fn_strip)
+
+    # tex is an Image (Arystan)
+    def write_video(texname, tex):
+        # Same as texture really!
+        file.write('\n\tVideo: "Video::%s", "Clip" {' % texname)
+
+        file.write('''
+        Type: "Clip"
+        Properties60:  {
+            Property: "FrameRate", "double", "",0
+            Property: "LastFrame", "int", "",0
+            Property: "Width", "int", "",0
+            Property: "Height", "int", "",0''')
+        if tex:
+            fname_rel, fname_strip = copy_image(tex)
+# 			fname, fname_strip, fname_rel = derived_paths(tex.filepath, basepath, EXP_IMAGE_COPY)
+        else:
+            fname = fname_strip = fname_rel = ''
+
+        file.write('\n\t\t\tProperty: "Path", "charptr", "", "%s"' % fname_strip)
+
+        file.write('''
+            Property: "StartFrame", "int", "",0
+            Property: "StopFrame", "int", "",0
+            Property: "PlaySpeed", "double", "",1
+            Property: "Offset", "KTime", "",0
+            Property: "InterlaceMode", "enum", "",0
+            Property: "FreeRunning", "bool", "",0
+            Property: "Loop", "bool", "",0
+            Property: "AccessMode", "enum", "",0
+        }
+        UseMipMap: 0''')
+
+        file.write('\n\t\tFilename: "%s"' % fname_strip)
+        if fname_strip:
+            fname_strip = '/' + fname_strip
+        file.write('\n\t\tRelativeFilename: "%s"' % fname_rel)  # make relative
+        file.write('\n\t}')
+
+    def write_texture(texname, tex, num):
+        # if tex is None then this is a dummy tex
+        file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {' % texname)
+        file.write('\n\t\tType: "TextureVideoClip"')
+        file.write('\n\t\tVersion: 202')
+        # TODO, rare case _empty_ exists as a name.
+        file.write('\n\t\tTextureName: "Texture::%s"' % texname)
+
+        file.write('''
+        Properties60:  {
+            Property: "Translation", "Vector", "A+",0,0,0
+            Property: "Rotation", "Vector", "A+",0,0,0
+            Property: "Scaling", "Vector", "A+",1,1,1''')
+        file.write('\n\t\t\tProperty: "Texture alpha", "Number", "A+",%i' % num)
+
+        # WrapModeU/V 0==rep, 1==clamp, TODO add support
+        file.write('''
+            Property: "TextureTypeUse", "enum", "",0
+            Property: "CurrentTextureBlendMode", "enum", "",1
+            Property: "UseMaterial", "bool", "",0
+            Property: "UseMipMap", "bool", "",0
+            Property: "CurrentMappingType", "enum", "",0
+            Property: "UVSwap", "bool", "",0''')
+
+        file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.use_clamp_x)
+        file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.use_clamp_y)
+
+        file.write('''
+            Property: "TextureRotationPivot", "Vector3D", "",0,0,0
+            Property: "TextureScalingPivot", "Vector3D", "",0,0,0
+            Property: "VideoProperty", "object", ""
+        }''')
+
+        file.write('\n\t\tMedia: "Video::%s"' % texname)
+
+        if tex:
+            fname_rel, fname_strip = copy_image(tex)
+# 			fname, fname_strip, fname_rel = derived_paths(tex.filepath, basepath, EXP_IMAGE_COPY)
+        else:
+            fname = fname_strip = fname_rel = ''
+
+        file.write('\n\t\tFileName: "%s"' % fname_strip)
+        file.write('\n\t\tRelativeFilename: "%s"' % fname_rel)  # need some make relative command
+
+        file.write('''
+        ModelUVTranslation: 0,0
+        ModelUVScaling: 1,1
+        Texture_Alpha_Source: "None"
+        Cropping: 0,0,0,0
+    }''')
+
+    def write_deformer_skin(obname):
+        '''
+        Each mesh has its own deformer
+        '''
+        file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {' % obname)
+        file.write('''
+        Version: 100
+        MultiLayer: 0
+        Type: "Skin"
+        Properties60:  {
+        }
+        Link_DeformAcuracy: 50
+    }''')
+
+    # in the example was 'Bip01 L Thigh_2'
+    def write_sub_deformer_skin(my_mesh, my_bone, weights):
+
+        '''
+        Each subdeformer is spesific to a mesh, but the bone it links to can be used by many sub-deformers
+        So the SubDeformer needs the mesh-object name as a prefix to make it unique
+
+        Its possible that there is no matching vgroup in this mesh, in that case no verts are in the subdeformer,
+        a but silly but dosnt really matter
+        '''
+        file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {' % (my_mesh.fbxName, my_bone.fbxName))
+
+        file.write('''
+        Version: 100
+        MultiLayer: 0
+        Type: "Cluster"
+        Properties60:  {
+            Property: "SrcModel", "object", ""
+            Property: "SrcModelReference", "object", ""
+        }
+        UserData: "", ""''')
+
+        # Support for bone parents
+        if my_mesh.fbxBoneParent:
+            if my_mesh.fbxBoneParent == my_bone:
+                # TODO - this is a bit lazy, we could have a simple write loop
+                # for this case because all weights are 1.0 but for now this is ok
+                # Parent Bones arent used all that much anyway.
+                vgroup_data = [(j, 1.0) for j in range(len(my_mesh.blenData.vertices))]
+            else:
+                # This bone is not a parent of this mesh object, no weights
+                vgroup_data = []
+
+        else:
+            # Normal weight painted mesh
+            if my_bone.blenName in weights[0]:
+                # Before we used normalized wright list
+                #vgroup_data = me.getVertsFromGroup(bone.name, 1)
+                group_index = weights[0].index(my_bone.blenName)
+                vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]]
+            else:
+                vgroup_data = []
+
+        file.write('\n\t\tIndexes: ')
+
+        i = -1
+        for vg in vgroup_data:
+            if i == -1:
+                file.write('%i' % vg[0])
+                i = 0
+            else:
+                if i == 23:
+                    file.write('\n\t\t')
+                    i = 0
+                file.write(',%i' % vg[0])
+            i += 1
+
+        file.write('\n\t\tWeights: ')
+        i = -1
+        for vg in vgroup_data:
+            if i == -1:
+                file.write('%.8f' % vg[1])
+                i = 0
+            else:
+                if i == 38:
+                    file.write('\n\t\t')
+                    i = 0
+                file.write(',%.8f' % vg[1])
+            i += 1
+
+        if my_mesh.fbxParent:
+            # TODO FIXME, this case is broken in some cases. skinned meshes just shouldnt have parents where possible!
+            m = (my_mesh.matrixWorld.copy().invert() * my_bone.fbxArm.matrixWorld.copy() * my_bone.restMatrix) * mtx4_z90
+        else:
+            # Yes! this is it...  - but dosnt work when the mesh is a.
+            m = (my_mesh.matrixWorld.copy().invert() * my_bone.fbxArm.matrixWorld.copy() * my_bone.restMatrix) * mtx4_z90
+
+        #m = mtx4_z90 * my_bone.restMatrix
+        matstr = mat4x4str(m)
+        matstr_i = mat4x4str(m.invert())
+
+        file.write('\n\t\tTransform: %s' % matstr_i)  # THIS IS __NOT__ THE GLOBAL MATRIX AS DOCUMENTED :/
+        file.write('\n\t\tTransformLink: %s' % matstr)
+        file.write('\n\t}')
+
+    def write_mesh(my_mesh):
+
+        me = my_mesh.blenData
+
+        # if there are non NULL materials on this mesh
+        do_materials = bool(my_mesh.blenMaterials)
+        do_textures = bool(my_mesh.blenTextures)
+        do_uvs = bool(me.uv_textures)
+
+        file.write('\n\tModel: "Model::%s", "Mesh" {' % my_mesh.fbxName)
+        file.write('\n\t\tVersion: 232')  # newline is added in write_object_props
+
+        # convert into lists once.
+        me_vertices = me.vertices[:]
+        me_edges = me.edges[:]
+        me_faces = me.faces[:]
+
+        poseMatrix = write_object_props(my_mesh.blenObject, None, my_mesh.parRelMatrix())[3]
+        pose_items.append((my_mesh.fbxName, poseMatrix))
+
+        file.write('\n\t\t}')
+        file.write('\n\t\tMultiLayer: 0')
+        file.write('\n\t\tMultiTake: 1')
+        file.write('\n\t\tShading: Y')
+        file.write('\n\t\tCulling: "CullingOff"')
+
+        # Write the Real Mesh data here
+        file.write('\n\t\tVertices: ')
+        i = -1
+
+        for v in me_vertices:
+            if i == -1:
+                file.write('%.6f,%.6f,%.6f' % v.co[:])
+                i = 0
+            else:
+                if i == 7:
+                    file.write('\n\t\t')
+                    i = 0
+                file.write(',%.6f,%.6f,%.6f' % v.co[:])
+            i += 1
+
+        file.write('\n\t\tPolygonVertexIndex: ')
+        i = -1
+        for f in me_faces:
+            fi = f.vertices[:]
+
+            # last index XORd w. -1 indicates end of face
+            if i == -1:
+                if len(fi) == 3:
+                    file.write('%i,%i,%i' % (fi[0], fi[1], fi[2] ^ -1))
+                else:
+                    file.write('%i,%i,%i,%i' % (fi[0], fi[1], fi[2], fi[3] ^ -1))
+                i = 0
+            else:
+                if i == 13:
+                    file.write('\n\t\t')
+                    i = 0
+                if len(fi) == 3:
+                    file.write(',%i,%i,%i' % (fi[0], fi[1], fi[2] ^ -1))
+                else:
+                    file.write(',%i,%i,%i,%i' % (fi[0], fi[1], fi[2], fi[3] ^ -1))
+            i += 1
+
+        # write loose edges as faces.
+        for ed in me_edges:
+            if ed.is_loose:
+                ed_val = ed.vertices[:]
+                ed_val = ed_val[0], ed_val[-1] ^ -1
+
+                if i == -1:
+                    file.write('%i,%i' % ed_val)
+                    i = 0
+                else:
+                    if i == 13:
+                        file.write('\n\t\t')
+                        i = 0
+                    file.write(',%i,%i' % ed_val)
+            i += 1
+
+        file.write('\n\t\tEdges: ')
+        i = -1
+        for ed in me_edges:
+                if i == -1:
+                    file.write('%i,%i' % (ed.vertices[0], ed.vertices[1]))
+                    i = 0
+                else:
+                    if i == 13:
+                        file.write('\n\t\t')
+                        i = 0
+                    file.write(',%i,%i' % (ed.vertices[0], ed.vertices[1]))
+                i += 1
+
+        file.write('\n\t\tGeometryVersion: 124')
+
+        file.write('''
+        LayerElementNormal: 0 {
+            Version: 101
+            Name: ""
+            MappingInformationType: "ByVertice"
+            ReferenceInformationType: "Direct"
+            Normals: ''')
+
+        i = -1
+        for v in me_vertices:
+            if i == -1:
+                file.write('%.15f,%.15f,%.15f' % v.normal[:])
+                i = 0
+            else:
+                if i == 2:
+                    file.write('\n\t\t\t ')
+                    i = 0
+                file.write(',%.15f,%.15f,%.15f' % v.normal[:])
+            i += 1
+        file.write('\n\t\t}')
+
+        # Write Face Smoothing
+        file.write('''
+        LayerElementSmoothing: 0 {
+            Version: 102
+            Name: ""
+            MappingInformationType: "ByPolygon"
+            ReferenceInformationType: "Direct"
+            Smoothing: ''')
+
+        i = -1
+        for f in me_faces:
+            if i == -1:
+                file.write('%i' % f.use_smooth)
+                i = 0
+            else:
+                if i == 54:
+                    file.write('\n\t\t\t ')
+                    i = 0
+                file.write(',%i' % f.use_smooth)
+            i += 1
+
+        file.write('\n\t\t}')
+
+        # Write Edge Smoothing
+        file.write('''
+        LayerElementSmoothing: 0 {
+            Version: 101
+            Name: ""
+            MappingInformationType: "ByEdge"
+            ReferenceInformationType: "Direct"
+            Smoothing: ''')
+
+        i = -1
+        for ed in me_edges:
+            if i == -1:
+                file.write('%i' % (ed.use_edge_sharp))
+                i = 0
+            else:
+                if i == 54:
+                    file.write('\n\t\t\t ')
+                    i = 0
+                file.write(',%i' % (ed.use_edge_sharp))
+            i += 1
+
+        file.write('\n\t\t}')
+
+        # Write VertexColor Layers
+        # note, no programs seem to use this info :/
+        collayers = []
+        if len(me.vertex_colors):
+            collayers = me.vertex_colors
+            for colindex, collayer in enumerate(collayers):
+                file.write('\n\t\tLayerElementColor: %i {' % colindex)
+                file.write('\n\t\t\tVersion: 101')
+                file.write('\n\t\t\tName: "%s"' % collayer.name)
+
+                file.write('''
+            MappingInformationType: "ByPolygonVertex"
+            ReferenceInformationType: "IndexToDirect"
+            Colors: ''')
+
+                i = -1
+                ii = 0  # Count how many Colors we write
+
+                for fi, cf in enumerate(collayer.data):
+                    if len(me_faces[fi].vertices) == 4:
+                        colors = cf.color1[:], cf.color2[:], cf.color3[:], cf.color4[:]
+                    else:
+                        colors = cf.color1[:], cf.color2[:], cf.color3[:]
+
+                    for col in colors:
+                        if i == -1:
+                            file.write('%.4f,%.4f,%.4f,1' % col)
+                            i = 0
+                        else:
+                            if i == 7:
+                                file.write('\n\t\t\t\t')
+                                i = 0
+                            file.write(',%.4f,%.4f,%.4f,1' % col)
+                        i += 1
+                        ii += 1  # One more Color
+
+                file.write('\n\t\t\tColorIndex: ')
+                i = -1
+                for j in range(ii):
+                    if i == -1:
+                        file.write('%i' % j)
+                        i = 0
+                    else:
+                        if i == 55:
+                            file.write('\n\t\t\t\t')
+                            i = 0
+                        file.write(',%i' % j)
+                    i += 1
+
+                file.write('\n\t\t}')
+
+        # Write UV and texture layers.
+        uvlayers = []
+        if do_uvs:
+            uvlayers = me.uv_textures
+            uvlayer_orig = me.uv_textures.active
+            for uvindex, uvlayer in enumerate(me.uv_textures):
+                file.write('\n\t\tLayerElementUV: %i {' % uvindex)
+                file.write('\n\t\t\tVersion: 101')
+                file.write('\n\t\t\tName: "%s"' % uvlayer.name)
+
+                file.write('''
+            MappingInformationType: "ByPolygonVertex"
+            ReferenceInformationType: "IndexToDirect"
+            UV: ''')
+
+                i = -1
+                ii = 0  # Count how many UVs we write
+
+                for uf in uvlayer.data:
+                    # workaround, since uf.uv iteration is wrong atm
+                    for uv in uf.uv:
+                        if i == -1:
+                            file.write('%.6f,%.6f' % uv[:])
+                            i = 0
+                        else:
+                            if i == 7:
+                                file.write('\n\t\t\t ')
+                                i = 0
+                            file.write(',%.6f,%.6f' % uv[:])
+                        i += 1
+                        ii += 1  # One more UV
+
+                file.write('\n\t\t\tUVIndex: ')
+                i = -1
+                for j in range(ii):
+                    if i == -1:
+                        file.write('%i' % j)
+                        i = 0
+                    else:
+                        if i == 55:
+                            file.write('\n\t\t\t\t')
+                            i = 0
+                        file.write(',%i' % j)
+                    i += 1
+
+                file.write('\n\t\t}')
+
+                if do_textures:
+                    file.write('\n\t\tLayerElementTexture: %i {' % uvindex)
+                    file.write('\n\t\t\tVersion: 101')
+                    file.write('\n\t\t\tName: "%s"' % uvlayer.name)
+
+                    if len(my_mesh.blenTextures) == 1:
+                        file.write('\n\t\t\tMappingInformationType: "AllSame"')
+                    else:
+                        file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
+
+                    file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
+                    file.write('\n\t\t\tBlendMode: "Translucent"')
+                    file.write('\n\t\t\tTextureAlpha: 1')
+                    file.write('\n\t\t\tTextureId: ')
+
+                    if len(my_mesh.blenTextures) == 1:
+                        file.write('0')
+                    else:
+                        texture_mapping_local = {None: -1}
+
+                        i = 0  # 1 for dummy
+                        for tex in my_mesh.blenTextures:
+                            if tex:  # None is set above
+                                texture_mapping_local[tex] = i
+                                i += 1
+
+                        i = -1
+                        for f in uvlayer.data:
+                            img_key = f.image
+
+                            if i == -1:
+                                i = 0
+                                file.write('%s' % texture_mapping_local[img_key])
+                            else:
+                                if i == 55:
+                                    file.write('\n			 ')
+                                    i = 0
+
+                                file.write(',%s' % texture_mapping_local[img_key])
+                            i += 1
+
+                else:
+                    file.write('''
+        LayerElementTexture: 0 {
+            Version: 101
+            Name: ""
+            MappingInformationType: "NoMappingInformation"
+            ReferenceInformationType: "IndexToDirect"
+            BlendMode: "Translucent"
+            TextureAlpha: 1
+            TextureId: ''')
+                file.write('\n\t\t}')
+
+        # Done with UV/textures.
+        if do_materials:
+            file.write('\n\t\tLayerElementMaterial: 0 {')
+            file.write('\n\t\t\tVersion: 101')
+            file.write('\n\t\t\tName: ""')
+
+            if len(my_mesh.blenMaterials) == 1:
+                file.write('\n\t\t\tMappingInformationType: "AllSame"')
+            else:
+                file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
+
+            file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
+            file.write('\n\t\t\tMaterials: ')
+
+            if len(my_mesh.blenMaterials) == 1:
+                file.write('0')
+            else:
+                # Build a material mapping for this
+                material_mapping_local = {}  # local-mat & tex : global index.
+
+                for j, mat_tex_pair in enumerate(my_mesh.blenMaterials):
+                    material_mapping_local[mat_tex_pair] = j
+
+                len_material_mapping_local = len(material_mapping_local)
+
+                mats = my_mesh.blenMaterialList
+
+                if me.uv_textures.active:
+                    uv_faces = me.uv_textures.active.data
+                else:
+                    uv_faces = [None] * len(me_faces)
+
+                i = -1
+                for f, uf in zip(me_faces, uv_faces):
+# 				for f in me_faces:
+                    try:
+                        mat = mats[f.material_index]
+                    except:
+                        mat = None
+
+                    if do_uvs:
+                        tex = uf.image  # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/
+                    else:
+                        tex = None
+
+                    if i == -1:
+                        i = 0
+                        file.write('%s' % (material_mapping_local[mat, tex]))  # None for mat or tex is ok
+                    else:
+                        if i == 55:
+                            file.write('\n\t\t\t\t')
+                            i = 0
+
+                        file.write(',%s' % (material_mapping_local[mat, tex]))
+                    i += 1
+
+            file.write('\n\t\t}')
+
+        file.write('''
+        Layer: 0 {
+            Version: 100
+            LayerElement:  {
+                Type: "LayerElementNormal"
+                TypedIndex: 0
+            }''')
+
+        if do_materials:
+            file.write('''
+            LayerElement:  {
+                Type: "LayerElementMaterial"
+                TypedIndex: 0
+            }''')
+
+        # Always write this
+        if do_textures:
+            file.write('''
+            LayerElement:  {
+                Type: "LayerElementTexture"
+                TypedIndex: 0
+            }''')
+
+        if me.vertex_colors:
+            file.write('''
+            LayerElement:  {
+                Type: "LayerElementColor"
+                TypedIndex: 0
+            }''')
+
+        if do_uvs:  # same as me.faceUV
+            file.write('''
+            LayerElement:  {
+                Type: "LayerElementUV"
+                TypedIndex: 0
+            }''')
+
+        file.write('\n\t\t}')
+
+        if len(uvlayers) > 1:
+            for i in range(1, len(uvlayers)):
+
+                file.write('\n\t\tLayer: %i {' % i)
+                file.write('\n\t\t\tVersion: 100')
+
+                file.write('''
+            LayerElement:  {
+                Type: "LayerElementUV"''')
+
+                file.write('\n\t\t\t\tTypedIndex: %i' % i)
+                file.write('\n\t\t\t}')
+
+                if do_textures:
+
+                    file.write('''
+            LayerElement:  {
+                Type: "LayerElementTexture"''')
+
+                    file.write('\n\t\t\t\tTypedIndex: %i' % i)
+                    file.write('\n\t\t\t}')
+
+                file.write('\n\t\t}')
+
+        if len(collayers) > 1:
+            # Take into account any UV layers
+            layer_offset = 0
+            if uvlayers:
+                layer_offset = len(uvlayers) - 1
+
+            for i in range(layer_offset, len(collayers) + layer_offset):
+                file.write('\n\t\tLayer: %i {' % i)
+                file.write('\n\t\t\tVersion: 100')
+
+                file.write('''
+            LayerElement:  {
+                Type: "LayerElementColor"''')
+
+                file.write('\n\t\t\t\tTypedIndex: %i' % i)
+                file.write('\n\t\t\t}')
+                file.write('\n\t\t}')
+        file.write('\n\t}')
+
+    def write_group(name):
+        file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {' % name)
+
+        file.write('''
+        Properties60:  {
+            Property: "MultiLayer", "bool", "",0
+            Property: "Pickable", "bool", "",1
+            Property: "Transformable", "bool", "",1
+            Property: "Show", "bool", "",1
+        }
+        MultiLayer: 0
+    }''')
+
+    # add meshes here to clear because they are not used anywhere.
+    meshes_to_clear = []
+
+    ob_meshes = []
+    ob_lights = []
+    ob_cameras = []
+    # in fbx we export bones as children of the mesh
+    # armatures not a part of a mesh, will be added to ob_arms
+    ob_bones = []
+    ob_arms = []
+    ob_null = []  # emptys
+
+    # List of types that have blender objects (not bones)
+    ob_all_typegroups = [ob_meshes, ob_lights, ob_cameras, ob_arms, ob_null]
+
+    groups = []  # blender groups, only add ones that have objects in the selections
+    materials = {}  # (mat, image) keys, should be a set()
+    textures = {}  # should be a set()
+
+    tmp_ob_type = ob_type = None  # incase no objects are exported, so as not to raise an error
+
+    # if EXP_OBS_SELECTED is false, use sceens objects
+    if not batch_objects:
+        if EXP_OBS_SELECTED:
+            tmp_objects = context.selected_objects
+        else:
+            tmp_objects = scene.objects
+    else:
+        tmp_objects = batch_objects
+
+    if EXP_ARMATURE:
+        # This is needed so applying modifiers dosnt apply the armature deformation, its also needed
+        # ...so mesh objects return their rest worldspace matrix when bone-parents are exported as weighted meshes.
+        # set every armature to its rest, backup the original values so we done mess up the scene
+        ob_arms_orig_rest = [arm.pose_position for arm in bpy.data.armatures]
+
+        for arm in bpy.data.armatures:
+            arm.pose_position = 'REST'
+
+        if ob_arms_orig_rest:
+            for ob_base in bpy.data.objects:
+                if ob_base.type == 'ARMATURE':
+                    ob_base.update()
+
+            # This causes the makeDisplayList command to effect the mesh
+            scene.frame_set(scene.frame_current)
+
+    for ob_base in tmp_objects:
+
+        # ignore dupli children
+        if ob_base.parent and ob_base.parent.dupli_type != 'NONE':
+            continue
+
+        obs = [(ob_base, ob_base.matrix_world)]
+        if ob_base.dupli_type != 'NONE':
+            ob_base.create_dupli_list(scene)
+            obs = [(dob.object, dob.matrix) for dob in ob_base.dupli_list]
+
+        for ob, mtx in obs:
+# 		for ob, mtx in BPyObject.getDerivedObjects(ob_base):
+            tmp_ob_type = ob.type
+            if tmp_ob_type == 'CAMERA':
+                if EXP_CAMERA:
+                    ob_cameras.append(my_object_generic(ob, mtx))
+            elif tmp_ob_type == 'LAMP':
+                if EXP_LAMP:
+                    ob_lights.append(my_object_generic(ob, mtx))
+            elif tmp_ob_type == 'ARMATURE':
+                if EXP_ARMATURE:
+                    # TODO - armatures dont work in dupligroups!
+                    if ob not in ob_arms:
+                        ob_arms.append(ob)
+                    # ob_arms.append(ob) # replace later. was "ob_arms.append(sane_obname(ob), ob)"
+            elif tmp_ob_type == 'EMPTY':
+                if EXP_EMPTY:
+                    ob_null.append(my_object_generic(ob, mtx))
+            elif EXP_MESH:
+                origData = True
+                if tmp_ob_type != 'MESH':
+                    try:
+                        me = ob.create_mesh(scene, True, 'PREVIEW')
+                    except:
+                        me = None
+
+                    if me:
+                        meshes_to_clear.append(me)
+                        mats = me.materials
+                        origData = False
+                else:
+                    # Mesh Type!
+                    if EXP_MESH_APPLY_MOD:
+                        me = ob.create_mesh(scene, True, 'PREVIEW')
+
+                        # print ob, me, me.getVertGroupNames()
+                        meshes_to_clear.append(me)
+                        origData = False
+                        mats = me.materials
+                    else:
+                        me = ob.data
+                        mats = me.materials
+
+# 						# Support object colors
+# 						tmp_colbits = ob.colbits
+# 						if tmp_colbits:
+# 							tmp_ob_mats = ob.getMaterials(1) # 1 so we get None's too.
+# 							for i in xrange(16):
+# 								if tmp_colbits & (1<<i):
+# 									mats[i] = tmp_ob_mats[i]
+# 							del tmp_ob_mats
+# 						del tmp_colbits
+
+                if me:
+# 					# This WILL modify meshes in blender if EXP_MESH_APPLY_MOD is disabled.
+# 					# so strictly this is bad. but only in rare cases would it have negative results
+# 					# say with dupliverts the objects would rotate a bit differently
+# 					if EXP_MESH_HQ_NORMALS:
+# 						BPyMesh.meshCalcNormals(me) # high quality normals nice for realtime engines.
+
+                    texture_mapping_local = {}
+                    material_mapping_local = {}
+                    if me.uv_textures:
+                        for uvlayer in me.uv_textures:
+                            for f, uf in zip(me.faces, uvlayer.data):
+                                tex = uf.image
+                                textures[tex] = texture_mapping_local[tex] = None
+
+                                try:
+                                    mat = mats[f.material_index]
+                                except:
+                                    mat = None
+
+                                materials[mat, tex] = material_mapping_local[mat, tex] = None  # should use sets, wait for blender 2.5
+
+                    else:
+                        for mat in mats:
+                            # 2.44 use mat.lib too for uniqueness
+                            materials[mat, None] = material_mapping_local[mat, None] = None
+                        else:
+                            materials[None, None] = None
+
+                    if EXP_ARMATURE:
+                        armob = ob.find_armature()
+                        blenParentBoneName = None
+
+                        # parent bone - special case
+                        if (not armob) and ob.parent and ob.parent.type == 'ARMATURE' and \
+                                ob.parent_type == 'BONE':
+                            armob = ob.parent
+                            blenParentBoneName = ob.parent_bone
+
+                        if armob and armob not in ob_arms:
+                            ob_arms.append(armob)
+
+                        # Warning for scaled, mesh objects with armatures
+                        if abs(ob.scale[0] - 1.0) > 0.05 or abs(ob.scale[1] - 1.0) > 0.05 or abs(ob.scale[1] - 1.0) > 0.05:
+                            operator.report('WARNING', "Object '%s' has a scale of (%.3f, %.3f, %.3f), Armature deformation will not work as expected!, Apply Scale to fix." % ((ob.name,) + tuple(ob.scale)))
+
+                    else:
+                        blenParentBoneName = armob = None
+
+                    my_mesh = my_object_generic(ob, mtx)
+                    my_mesh.blenData = me
+                    my_mesh.origData = origData
+                    my_mesh.blenMaterials = list(material_mapping_local.keys())
+                    my_mesh.blenMaterialList = mats
+                    my_mesh.blenTextures = list(texture_mapping_local.keys())
+
+                    # if only 1 null texture then empty the list
+                    if len(my_mesh.blenTextures) == 1 and my_mesh.blenTextures[0] is None:
+                        my_mesh.blenTextures = []
+
+                    my_mesh.fbxArm = armob  # replace with my_object_generic armature instance later
+                    my_mesh.fbxBoneParent = blenParentBoneName  # replace with my_bone instance later
+
+                    ob_meshes.append(my_mesh)
+
+        # not forgetting to free dupli_list
+        if ob_base.dupli_list:
+            ob_base.free_dupli_list()
+
+    if EXP_ARMATURE:
+        # now we have the meshes, restore the rest arm position
+        for i, arm in enumerate(bpy.data.armatures):
+            arm.pose_position = ob_arms_orig_rest[i]
+
+        if ob_arms_orig_rest:
+            for ob_base in bpy.data.objects:
+                if ob_base.type == 'ARMATURE':
+                    ob_base.update()
+            # This causes the makeDisplayList command to effect the mesh
+            scene.frame_set(scene.frame_current)
+
+    del tmp_ob_type, tmp_objects
+
+    # now we have collected all armatures, add bones
+    for i, ob in enumerate(ob_arms):
+
+        ob_arms[i] = my_arm = my_object_generic(ob)
+
+        my_arm.fbxBones = []
+        my_arm.blenData = ob.data
+        if ob.animation_data:
+            my_arm.blenAction = ob.animation_data.action
+        else:
+            my_arm.blenAction = None
+# 		my_arm.blenAction =		ob.action
+        my_arm.blenActionList = []
+
+        # fbxName, blenderObject, my_bones, blenderActions
+        #ob_arms[i] = fbxArmObName, ob, arm_my_bones, (ob.action, [])
+
+        for bone in my_arm.blenData.bones:
+            my_bone = my_bone_class(bone, my_arm)
+            my_arm.fbxBones.append(my_bone)
+            ob_bones.append(my_bone)
+
+    # add the meshes to the bones and replace the meshes armature with own armature class
+    #for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
+    for my_mesh in ob_meshes:
+        # Replace
+        # ...this could be sped up with dictionary mapping but its unlikely for
+        # it ever to be a bottleneck - (would need 100+ meshes using armatures)
+        if my_mesh.fbxArm:
+            for my_arm in ob_arms:
+                if my_arm.blenObject == my_mesh.fbxArm:
+                    my_mesh.fbxArm = my_arm
+                    break
+
+        for my_bone in ob_bones:
+
+            # The mesh uses this bones armature!
+            if my_bone.fbxArm == my_mesh.fbxArm:
+                if my_bone.blenBone.use_deform:
+                    my_bone.blenMeshes[my_mesh.fbxName] = me
+
+                # parent bone: replace bone names with our class instances
+                # my_mesh.fbxBoneParent is None or a blender bone name initialy, replacing if the names match.
+                if my_mesh.fbxBoneParent == my_bone.blenName:
+                    my_mesh.fbxBoneParent = my_bone
+
+    bone_deformer_count = 0  # count how many bones deform a mesh
+    my_bone_blenParent = None
+    for my_bone in ob_bones:
+        my_bone_blenParent = my_bone.blenBone.parent
+        if my_bone_blenParent:
+            for my_bone_parent in ob_bones:
+                # Note 2.45rc2 you can compare bones normally
+                if my_bone_blenParent.name == my_bone_parent.blenName and my_bone.fbxArm == my_bone_parent.fbxArm:
+                    my_bone.parent = my_bone_parent
+                    break
+
+        # Not used at the moment
+        # my_bone.calcRestMatrixLocal()
+        bone_deformer_count += len(my_bone.blenMeshes)
+
+    del my_bone_blenParent
+
+    # Build blenObject -> fbxObject mapping
+    # this is needed for groups as well as fbxParenting
+    bpy.data.objects.tag(False)
+
+    # using a list of object names for tagging (Arystan)
+
+    tmp_obmapping = {}
+    for ob_generic in ob_all_typegroups:
+        for ob_base in ob_generic:
+            ob_base.blenObject.tag = True
+            tmp_obmapping[ob_base.blenObject] = ob_base
+
+    # Build Groups from objects we export
+    for blenGroup in bpy.data.groups:
+        fbxGroupName = None
+        for ob in blenGroup.objects:
+            if ob.tag:
+                if fbxGroupName is None:
+                    fbxGroupName = sane_groupname(blenGroup)
+                    groups.append((fbxGroupName, blenGroup))
+
+                tmp_obmapping[ob].fbxGroupNames.append(fbxGroupName)  # also adds to the objects fbxGroupNames
+
+    groups.sort()  # not really needed
+
+    # Assign parents using this mapping
+    for ob_generic in ob_all_typegroups:
+        for my_ob in ob_generic:
+            parent = my_ob.blenObject.parent
+            if parent and parent.tag:  # does it exist and is it in the mapping
+                my_ob.fbxParent = tmp_obmapping[parent]
+
+    del tmp_obmapping
+    # Finished finding groups we use
+
+    materials = [(sane_matname(mat_tex_pair), mat_tex_pair) for mat_tex_pair in materials.keys()]
+    textures = [(sane_texname(tex), tex) for tex in textures.keys()  if tex]
+    materials.sort()  # sort by name
+    textures.sort()
+
+    camera_count = 8
+    file.write('''
+
+; Object definitions
+;------------------------------------------------------------------
+
+Definitions:  {
+    Version: 100
+    Count: %i''' % (\
+        1 + 1 + camera_count + \
+        len(ob_meshes) + \
+        len(ob_lights) + \
+        len(ob_cameras) + \
+        len(ob_arms) + \
+        len(ob_null) + \
+        len(ob_bones) + \
+        bone_deformer_count + \
+        len(materials) + \
+        (len(textures) * 2)))  # add 1 for the root model 1 for global settings
+
+    del bone_deformer_count
+
+    file.write('''
+    ObjectType: "Model" {
+        Count: %i
+    }''' % (\
+        1 + camera_count + \
+        len(ob_meshes) + \
+        len(ob_lights) + \
+        len(ob_cameras) + \
+        len(ob_arms) + \
+        len(ob_null) + \
+        len(ob_bones)))  # add 1 for the root model
+
+    file.write('''
+    ObjectType: "Geometry" {
+        Count: %i
+    }''' % len(ob_meshes))
+
+    if materials:
+        file.write('''
+    ObjectType: "Material" {
+        Count: %i
+    }''' % len(materials))
+
+    if textures:
+        file.write('''
+    ObjectType: "Texture" {
+        Count: %i
+    }''' % len(textures))  # add 1 for an empty tex
+        file.write('''
+    ObjectType: "Video" {
+        Count: %i
+    }''' % len(textures))  # add 1 for an empty tex
+
+    tmp = 0
+    # Add deformer nodes
+    for my_mesh in ob_meshes:
+        if my_mesh.fbxArm:
+            tmp += 1
+
+    # Add subdeformers
+    for my_bone in ob_bones:
+        tmp += len(my_bone.blenMeshes)
+
+    if tmp:
+        file.write('''
+    ObjectType: "Deformer" {
+        Count: %i
+    }''' % tmp)
+    del tmp
+
+    # we could avoid writing this possibly but for now just write it
+
+    file.write('''
+    ObjectType: "Pose" {
+        Count: 1
+    }''')
+
+    if groups:
+        file.write('''
+    ObjectType: "GroupSelection" {
+        Count: %i
+    }''' % len(groups))
+
+    file.write('''
+    ObjectType: "GlobalSettings" {
+        Count: 1
+    }
+}''')
+
+    file.write('''
+
+; Object properties
+;------------------------------------------------------------------
+
+Objects:  {''')
+
+    # To comply with other FBX FILES
+    write_camera_switch()
+
+    # Write the null object
+    write_null(None, 'blend_root')  # , GLOBAL_MATRIX)
+
+    for my_null in ob_null:
+        write_null(my_null)
+
+    for my_arm in ob_arms:
+        write_null(my_arm)
+
+    for my_cam in ob_cameras:
+        write_camera(my_cam)
+
+    for my_light in ob_lights:
+        write_light(my_light)
+
+    for my_mesh in ob_meshes:
+        write_mesh(my_mesh)
+
+    #for bonename, bone, obname, me, armob in ob_bones:
+    for my_bone in ob_bones:
+        write_bone(my_bone)
+
+    write_camera_default()
+
+    for matname, (mat, tex) in materials:
+        write_material(matname, mat)  # We only need to have a material per image pair, but no need to write any image info into the material (dumb fbx standard)
+
+    # each texture uses a video, odd
+    for texname, tex in textures:
+        write_video(texname, tex)
+    i = 0
+    for texname, tex in textures:
+        write_texture(texname, tex, i)
+        i += 1
+
+    for groupname, group in groups:
+        write_group(groupname)
+
+    # NOTE - c4d and motionbuilder dont need normalized weights, but deep-exploration 5 does and (max?) do.
+
+    # Write armature modifiers
+    # TODO - add another MODEL? - because of this skin definition.
+    for my_mesh in ob_meshes:
+        if my_mesh.fbxArm:
+            write_deformer_skin(my_mesh.fbxName)
+
+            # Get normalized weights for temorary use
+            if my_mesh.fbxBoneParent:
+                weights = None
+            else:
+                weights = meshNormalizedWeights(my_mesh.blenObject, my_mesh.blenData)
+
+            #for bonename, bone, obname, bone_mesh, armob in ob_bones:
+            for my_bone in ob_bones:
+                if me in iter(my_bone.blenMeshes.values()):
+                    write_sub_deformer_skin(my_mesh, my_bone, weights)
+
+    # Write pose's really weired, only needed when an armature and mesh are used together
+    # each by themselves dont need pose data. for now only pose meshes and bones
+
+    file.write('''
+    Pose: "Pose::BIND_POSES", "BindPose" {
+        Type: "BindPose"
+        Version: 100
+        Properties60:  {
+        }
+        NbPoseNodes: ''')
+    file.write(str(len(pose_items)))
+
+    for fbxName, matrix in pose_items:
+        file.write('\n\t\tPoseNode:  {')
+        file.write('\n\t\t\tNode: "Model::%s"' % fbxName)
+        file.write('\n\t\t\tMatrix: %s' % mat4x4str(matrix if matrix else Matrix()))
+        file.write('\n\t\t}')
+
+    file.write('\n\t}')
+
+    # Finish Writing Objects
+    # Write global settings
+    file.write('''
+    GlobalSettings:  {
+        Version: 1000
+        Properties60:  {
+            Property: "UpAxis", "int", "",1
+            Property: "UpAxisSign", "int", "",1
+            Property: "FrontAxis", "int", "",2
+            Property: "FrontAxisSign", "int", "",1
+            Property: "CoordAxis", "int", "",0
+            Property: "CoordAxisSign", "int", "",1
+            Property: "UnitScaleFactor", "double", "",100
+        }
+    }
+''')
+    file.write('}')
+
+    file.write('''
+
+; Object relations
+;------------------------------------------------------------------
+
+Relations:  {''')
+
+    file.write('\n\tModel: "Model::blend_root", "Null" {\n\t}')
+
+    for my_null in ob_null:
+        file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_null.fbxName)
+
+    for my_arm in ob_arms:
+        file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_arm.fbxName)
+
+    for my_mesh in ob_meshes:
+        file.write('\n\tModel: "Model::%s", "Mesh" {\n\t}' % my_mesh.fbxName)
+
+    # TODO - limbs can have the same name for multiple armatures, should prefix.
+    #for bonename, bone, obname, me, armob in ob_bones:
+    for my_bone in ob_bones:
+        file.write('\n\tModel: "Model::%s", "Limb" {\n\t}' % my_bone.fbxName)
+
+    for my_cam in ob_cameras:
+        file.write('\n\tModel: "Model::%s", "Camera" {\n\t}' % my_cam.fbxName)
+
+    for my_light in ob_lights:
+        file.write('\n\tModel: "Model::%s", "Light" {\n\t}' % my_light.fbxName)
+
+    file.write('''
+    Model: "Model::Producer Perspective", "Camera" {
+    }
+    Model: "Model::Producer Top", "Camera" {
+    }
+    Model: "Model::Producer Bottom", "Camera" {
+    }
+    Model: "Model::Producer Front", "Camera" {
+    }
+    Model: "Model::Producer Back", "Camera" {
+    }
+    Model: "Model::Producer Right", "Camera" {
+    }
+    Model: "Model::Producer Left", "Camera" {
+    }
+    Model: "Model::Camera Switcher", "CameraSwitcher" {
+    }''')
+
+    for matname, (mat, tex) in materials:
+        file.write('\n\tMaterial: "Material::%s", "" {\n\t}' % matname)
+
+    if textures:
+        for texname, tex in textures:
+            file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {\n\t}' % texname)
+        for texname, tex in textures:
+            file.write('\n\tVideo: "Video::%s", "Clip" {\n\t}' % texname)
+
+    # deformers - modifiers
+    for my_mesh in ob_meshes:
+        if my_mesh.fbxArm:
+            file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {\n\t}' % my_mesh.fbxName)
+
+    #for bonename, bone, obname, me, armob in ob_bones:
+    for my_bone in ob_bones:
+        for fbxMeshObName in my_bone.blenMeshes:  # .keys() - fbxMeshObName
+            # is this bone effecting a mesh?
+            file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {\n\t}' % (fbxMeshObName, my_bone.fbxName))
+
+    # This should be at the end
+    # file.write('\n\tPose: "Pose::BIND_POSES", "BindPose" {\n\t}')
+
+    for groupname, group in groups:
+        file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {\n\t}' % groupname)
+
+    file.write('\n}')
+    file.write('''
+
+; Object connections
+;------------------------------------------------------------------
+
+Connections:  {''')
+
+    # NOTE - The FBX SDK dosnt care about the order but some importers DO!
+    # for instance, defining the material->mesh connection
+    # before the mesh->blend_root crashes cinema4d
+
+    # write the fake root node
+    file.write('\n\tConnect: "OO", "Model::blend_root", "Model::Scene"')
+
+    for ob_generic in ob_all_typegroups:  # all blender 'Object's we support
+        for my_ob in ob_generic:
+            if my_ob.fbxParent:
+                file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_ob.fbxName, my_ob.fbxParent.fbxName))
+            else:
+                file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_ob.fbxName)
+
+    if materials:
+        for my_mesh in ob_meshes:
+            # Connect all materials to all objects, not good form but ok for now.
+            for mat, tex in my_mesh.blenMaterials:
+                mat_name = mat.name if mat else None
+                tex_name = tex.name if tex else None
+
+                file.write('\n\tConnect: "OO", "Material::%s", "Model::%s"' % (sane_name_mapping_mat[mat_name, tex_name], my_mesh.fbxName))
+
+    if textures:
+        for my_mesh in ob_meshes:
+            if my_mesh.blenTextures:
+                # file.write('\n\tConnect: "OO", "Texture::_empty_", "Model::%s"' % my_mesh.fbxName)
+                for tex in my_mesh.blenTextures:
+                    if tex:
+                        file.write('\n\tConnect: "OO", "Texture::%s", "Model::%s"' % (sane_name_mapping_tex[tex.name], my_mesh.fbxName))
+
+        for texname, tex in textures:
+            file.write('\n\tConnect: "OO", "Video::%s", "Texture::%s"' % (texname, texname))
+
+    for my_mesh in ob_meshes:
+        if my_mesh.fbxArm:
+            file.write('\n\tConnect: "OO", "Deformer::Skin %s", "Model::%s"' % (my_mesh.fbxName, my_mesh.fbxName))
+
+    #for bonename, bone, obname, me, armob in ob_bones:
+    for my_bone in ob_bones:
+        for fbxMeshObName in my_bone.blenMeshes:  # .keys()
+            file.write('\n\tConnect: "OO", "SubDeformer::Cluster %s %s", "Deformer::Skin %s"' % (fbxMeshObName, my_bone.fbxName, fbxMeshObName))
+
+    # limbs -> deformers
+    # for bonename, bone, obname, me, armob in ob_bones:
+    for my_bone in ob_bones:
+        for fbxMeshObName in my_bone.blenMeshes:  # .keys()
+            file.write('\n\tConnect: "OO", "Model::%s", "SubDeformer::Cluster %s %s"' % (my_bone.fbxName, fbxMeshObName, my_bone.fbxName))
+
+    #for bonename, bone, obname, me, armob in ob_bones:
+    for my_bone in ob_bones:
+        # Always parent to armature now
+        if my_bone.parent:
+            file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.parent.fbxName))
+        else:
+            # the armature object is written as an empty and all root level bones connect to it
+            file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.fbxArm.fbxName))
+
+    # groups
+    if groups:
+        for ob_generic in ob_all_typegroups:
+            for ob_base in ob_generic:
+                for fbxGroupName in ob_base.fbxGroupNames:
+                    file.write('\n\tConnect: "OO", "Model::%s", "GroupSelection::%s"' % (ob_base.fbxName, fbxGroupName))
+
+    for my_arm in ob_arms:
+        file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_arm.fbxName)
+
+    file.write('\n}')
+
+    # Needed for scene footer as well as animation
+    render = scene.render
+
+    # from the FBX sdk
+    #define KTIME_ONE_SECOND        KTime (K_LONGLONG(46186158000))
+    def fbx_time(t):
+        # 0.5 + val is the same as rounding.
+        return int(0.5 + ((t / fps) * 46186158000))
+
+    fps = float(render.fps)
+    start = scene.frame_start
+    end = scene.frame_end
+    if end < start:
+        start, end = end, st
+
+    # comment the following line, otherwise we dont get the pose
+    # if start==end: ANIM_ENABLE = False
+
+    # animations for these object types
+    ob_anim_lists = ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms
+
+    if ANIM_ENABLE and [tmp for tmp in ob_anim_lists if tmp]:
+
+        frame_orig = scene.frame_current
+
+        if ANIM_OPTIMIZE:
+            ANIM_OPTIMIZE_PRECISSION_FLOAT = 0.1 ** ANIM_OPTIMIZE_PRECISSION
+
+        # default action, when no actions are avaioable
+        tmp_actions = []
+        blenActionDefault = None
+        action_lastcompat = None
+
+        # instead of tagging
+        tagged_actions = []
+
+        if ANIM_ACTION_ALL:
+# 			bpy.data.actions.tag = False
+            tmp_actions = bpy.data.actions[:]
+
+            # find which actions are compatible with the armatures
+            # blenActions is not yet initialized so do it now.
+            tmp_act_count = 0
+            for my_arm in ob_arms:
+
+                # get the default name
+                if not blenActionDefault:
+                    blenActionDefault = my_arm.blenAction
+
+                arm_bone_names = set([my_bone.blenName for my_bone in my_arm.fbxBones])
+
+                for action in tmp_actions:
+
+                    action_chan_names = arm_bone_names.intersection(set([g.name for g in action.groups]))
+# 					action_chan_names = arm_bone_names.intersection( set(action.getChannelNames()) )
+
+                    if action_chan_names:  # at least one channel matches.
+                        my_arm.blenActionList.append(action)
+                        tagged_actions.append(action.name)
+# 						action.tag = True
+                        tmp_act_count += 1
+
+                        # incase there is no actions applied to armatures
+                        action_lastcompat = action
+
+            if tmp_act_count:
+                # unlikely to ever happen but if no actions applied to armatures, just use the last compatible armature.
+                if not blenActionDefault:
+                    blenActionDefault = action_lastcompat
+
+        del action_lastcompat
+
+        tmp_actions.insert(0, None)  # None is the default action
+
+        file.write('''
+;Takes and animation section
+;----------------------------------------------------
+
+Takes:  {''')
+
+        if blenActionDefault:
+            file.write('\n\tCurrent: "%s"' % sane_takename(blenActionDefault))
+        else:
+            file.write('\n\tCurrent: "Default Take"')
+
+        for blenAction in tmp_actions:
+            # we have tagged all actious that are used be selected armatures
+            if blenAction:
+                if blenAction.name in tagged_actions:
+# 				if blenAction.tag:
+                    print('\taction: "%s" exporting...' % blenAction.name)
+                else:
+                    print('\taction: "%s" has no armature using it, skipping' % blenAction.name)
+                    continue
+
+            if blenAction is None:
+                # Warning, this only accounts for tmp_actions being [None]
+                file.write('\n\tTake: "Default Take" {')
+                act_start = start
+                act_end = end
+            else:
+                # use existing name
+                if blenAction == blenActionDefault:  # have we already got the name
+                    file.write('\n\tTake: "%s" {' % sane_name_mapping_take[blenAction.name])
+                else:
+                    file.write('\n\tTake: "%s" {' % sane_takename(blenAction))
+
+                act_start, act_end = blenAction.frame_range
+                act_start = int(act_start)
+                act_end = int(act_end)
+
+                # Set the action active
+                for my_arm in ob_arms:
+                    if my_arm.blenObject.animation_data and blenAction in my_arm.blenActionList:
+                        my_arm.blenObject.animation_data.action = blenAction
+                        # print('\t\tSetting Action!', blenAction)
+                # scene.update(1)
+
+            file.write('\n\t\tFileName: "Default_Take.tak"')  # ??? - not sure why this is needed
+            file.write('\n\t\tLocalTime: %i,%i' % (fbx_time(act_start - 1), fbx_time(act_end - 1)))  # ??? - not sure why this is needed
+            file.write('\n\t\tReferenceTime: %i,%i' % (fbx_time(act_start - 1), fbx_time(act_end - 1)))  # ??? - not sure why this is needed
+
+            file.write('''
+
+        ;Models animation
+        ;----------------------------------------------------''')
+
+            # set pose data for all bones
+            # do this here incase the action changes
+            '''
+            for my_bone in ob_bones:
+                my_bone.flushAnimData()
+            '''
+            i = act_start
+            while i <= act_end:
+                scene.frame_set(i)
+                for ob_generic in ob_anim_lists:
+                    for my_ob in ob_generic:
+                        #Blender.Window.RedrawAll()
+                        if ob_generic == ob_meshes and my_ob.fbxArm:
+                            # We cant animate armature meshes!
+                            my_ob.setPoseFrame(i, fake=True)
+                        else:
+                            my_ob.setPoseFrame(i)
+
+                i += 1
+
+            #for bonename, bone, obname, me, armob in ob_bones:
+            for ob_generic in (ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms):
+
+                for my_ob in ob_generic:
+
+                    if ob_generic == ob_meshes and my_ob.fbxArm:
+                        # do nothing,
+                        pass
+                    else:
+
+                        file.write('\n\t\tModel: "Model::%s" {' % my_ob.fbxName)  # ??? - not sure why this is needed
+                        file.write('\n\t\t\tVersion: 1.1')
+                        file.write('\n\t\t\tChannel: "Transform" {')
+
+                        context_bone_anim_mats = [(my_ob.getAnimParRelMatrix(frame), my_ob.getAnimParRelMatrixRot(frame)) for frame in range(act_start, act_end + 1)]
+
+                        # ----------------
+                        # ----------------
+                        for TX_LAYER, TX_CHAN in enumerate('TRS'):  # transform, rotate, scale
+
+                            if TX_CHAN == 'T':
+                                context_bone_anim_vecs = [mtx[0].translation_part() for mtx in context_bone_anim_mats]
+                            elif	TX_CHAN == 'S':
+                                context_bone_anim_vecs = [mtx[0].scale_part() for mtx in context_bone_anim_mats]
+                            elif	TX_CHAN == 'R':
+                                # Was....
+                                # elif 	TX_CHAN=='R':	context_bone_anim_vecs = [mtx[1].to_euler()			for mtx in context_bone_anim_mats]
+                                #
+                                # ...but we need to use the previous euler for compatible conversion.
+                                context_bone_anim_vecs = []
+                                prev_eul = None
+                                for mtx in context_bone_anim_mats:
+                                    if prev_eul:
+                                        prev_eul = mtx[1].to_euler('XYZ', prev_eul)
+                                    else:
+                                        prev_eul = mtx[1].to_euler()
+                                    context_bone_anim_vecs.append(tuple_rad_to_deg(prev_eul))
+
+                            file.write('\n\t\t\t\tChannel: "%s" {' % TX_CHAN)  # translation
+
+                            for i in range(3):
+                                # Loop on each axis of the bone
+                                file.write('\n\t\t\t\t\tChannel: "%s" {' % ('XYZ'[i]))  # translation
+                                file.write('\n\t\t\t\t\t\tDefault: %.15f' % context_bone_anim_vecs[0][i])
+                                file.write('\n\t\t\t\t\t\tKeyVer: 4005')
+
+                                if not ANIM_OPTIMIZE:
+                                    # Just write all frames, simple but in-eficient
+                                    file.write('\n\t\t\t\t\t\tKeyCount: %i' % (1 + act_end - act_start))
+                                    file.write('\n\t\t\t\t\t\tKey: ')
+                                    frame = act_start
+                                    while frame <= act_end:
+                                        if frame != act_start:
+                                            file.write(',')
+
+                                        # Curve types are 'C,n' for constant, 'L' for linear
+                                        # C,n is for bezier? - linear is best for now so we can do simple keyframe removal
+                                        file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame - 1), context_bone_anim_vecs[frame - act_start][i]))
+                                        frame += 1
+                                else:
+                                    # remove unneeded keys, j is the frame, needed when some frames are removed.
+                                    context_bone_anim_keys = [(vec[i], j) for j, vec in enumerate(context_bone_anim_vecs)]
+
+                                    # last frame to fisrt frame, missing 1 frame on either side.
+                                    # removeing in a backwards loop is faster
+                                    #for j in xrange( (act_end-act_start)-1, 0, -1 ):
+                                    # j = (act_end-act_start)-1
+                                    j = len(context_bone_anim_keys) - 2
+                                    while j > 0 and len(context_bone_anim_keys) > 2:
+                                        # print j, len(context_bone_anim_keys)
+                                        # Is this key the same as the ones next to it?
+
+                                        # co-linear horizontal...
+                                        if		abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j - 1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT and \
+                                                abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j + 1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
+
+                                            del context_bone_anim_keys[j]
+
+                                        else:
+                                            frame_range = float(context_bone_anim_keys[j + 1][1] - context_bone_anim_keys[j - 1][1])
+                                            frame_range_fac1 = (context_bone_anim_keys[j + 1][1] - context_bone_anim_keys[j][1]) / frame_range
+                                            frame_range_fac2 = 1.0 - frame_range_fac1
+
+                                            if abs(((context_bone_anim_keys[j - 1][0] * frame_range_fac1 + context_bone_anim_keys[j + 1][0] * frame_range_fac2)) - context_bone_anim_keys[j][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
+                                                del context_bone_anim_keys[j]
+                                            else:
+                                                j -= 1
+
+                                        # keep the index below the list length
+                                        if j > len(context_bone_anim_keys) - 2:
+                                            j = len(context_bone_anim_keys) - 2
+
+                                    if len(context_bone_anim_keys) == 2 and context_bone_anim_keys[0][0] == context_bone_anim_keys[1][0]:
+
+                                        # This axis has no moton, its okay to skip KeyCount and Keys in this case
+                                        # pass
+
+                                        # better write one, otherwise we loose poses with no animation
+                                        file.write('\n\t\t\t\t\t\tKeyCount: 1')
+                                        file.write('\n\t\t\t\t\t\tKey: ')
+                                        file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(start), context_bone_anim_keys[0][0]))
+                                    else:
+                                        # We only need to write these if there is at least one
+                                        file.write('\n\t\t\t\t\t\tKeyCount: %i' % len(context_bone_anim_keys))
+                                        file.write('\n\t\t\t\t\t\tKey: ')
+                                        for val, frame in context_bone_anim_keys:
+                                            if frame != context_bone_anim_keys[0][1]:  # not the first
+                                                file.write(',')
+                                            # frame is already one less then blenders frame
+                                            file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame), val))
+
+                                if i == 0:
+                                    file.write('\n\t\t\t\t\t\tColor: 1,0,0')
+                                elif i == 1:
+                                    file.write('\n\t\t\t\t\t\tColor: 0,1,0')
+                                elif i == 2:
+                                    file.write('\n\t\t\t\t\t\tColor: 0,0,1')
+
+                                file.write('\n\t\t\t\t\t}')
+                            file.write('\n\t\t\t\t\tLayerType: %i' % (TX_LAYER + 1))
+                            file.write('\n\t\t\t\t}')
+
+                        # ---------------
+
+                        file.write('\n\t\t\t}')
+                        file.write('\n\t\t}')
+
+            # end the take
+            file.write('\n\t}')
+
+            # end action loop. set original actions
+            # do this after every loop incase actions effect eachother.
+            for my_arm in ob_arms:
+                if my_arm.blenObject.animation_data:
+                    my_arm.blenObject.animation_data.action = my_arm.blenAction
+
+        file.write('\n}')
+
+        scene.frame_set(frame_orig)
+
+    else:
+        # no animation
+        file.write('\n;Takes and animation section')
+        file.write('\n;----------------------------------------------------')
+        file.write('\n')
+        file.write('\nTakes:  {')
+        file.write('\n\tCurrent: ""')
+        file.write('\n}')
+
+    # write meshes animation
+    #for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
+
+    # Clear mesh data Only when writing with modifiers applied
+    for me in meshes_to_clear:
+        bpy.data.meshes.remove(me)
+
+    # --------------------------- Footer
+    if world:
+        m = world.mist_settings
+        has_mist = m.use_mist
+        mist_intense = m.intensity
+        mist_start = m.start
+        mist_end = m.depth
+        mist_height = m.height
+        world_hor = world.horizon_color
+    else:
+        has_mist = mist_intense = mist_start = mist_end = mist_height = 0
+        world_hor = 0, 0, 0
+
+    file.write('\n;Version 5 settings')
+    file.write('\n;------------------------------------------------------------------')
+    file.write('\n')
+    file.write('\nVersion5:  {')
+    file.write('\n\tAmbientRenderSettings:  {')
+    file.write('\n\t\tVersion: 101')
+    file.write('\n\t\tAmbientLightColor: %.1f,%.1f,%.1f,0' % tuple(world_amb))
+    file.write('\n\t}')
+    file.write('\n\tFogOptions:  {')
+    file.write('\n\t\tFlogEnable: %i' % has_mist)
+    file.write('\n\t\tFogMode: 0')
+    file.write('\n\t\tFogDensity: %.3f' % mist_intense)
+    file.write('\n\t\tFogStart: %.3f' % mist_start)
+    file.write('\n\t\tFogEnd: %.3f' % mist_end)
+    file.write('\n\t\tFogColor: %.1f,%.1f,%.1f,1' % tuple(world_hor))
+    file.write('\n\t}')
+    file.write('\n\tSettings:  {')
+    file.write('\n\t\tFrameRate: "%i"' % int(fps))
+    file.write('\n\t\tTimeFormat: 1')
+    file.write('\n\t\tSnapOnFrames: 0')
+    file.write('\n\t\tReferenceTimeIndex: -1')
+    file.write('\n\t\tTimeLineStartTime: %i' % fbx_time(start - 1))
+    file.write('\n\t\tTimeLineStopTime: %i' % fbx_time(end - 1))
+    file.write('\n\t}')
+    file.write('\n\tRendererSetting:  {')
+    file.write('\n\t\tDefaultCamera: "Producer Perspective"')
+    file.write('\n\t\tDefaultViewingMode: 0')
+    file.write('\n\t}')
+    file.write('\n}')
+    file.write('\n')
+
+    # XXX, shouldnt be global!
+    sane_name_mapping_ob.clear()
+    sane_name_mapping_mat.clear()
+    sane_name_mapping_tex.clear()
+    sane_name_mapping_take.clear()
+    sane_name_mapping_group.clear()
+
+    ob_arms[:] = []
+    ob_bones[:] = []
+    ob_cameras[:] = []
+    ob_lights[:] = []
+    ob_meshes[:] = []
+    ob_null[:] = []
+
+    # copy images if enabled
+# 	if EXP_IMAGE_COPY:
+# # 		copy_images( basepath,  [ tex[1] for tex in textures if tex[1] != None ])
+# 		bpy.util.copy_images( [ tex[1] for tex in textures if tex[1] != None ], basepath)
+    file.close()
+
+    print('export finished in %.4f sec.' % (time.clock() - start_time))
+    return {'FINISHED'}
+
+
+# NOTES (all line numbers correspond to original export_fbx.py (under release/scripts)
+# - Draw.PupMenu alternative in 2.5?, temporarily replaced PupMenu with print
+# - get rid of bpy.path.clean_name somehow
+# + fixed: isinstance(inst, bpy.types.*) doesn't work on RNA objects: line 565
+# + get rid of BPyObject_getObjectArmature, move it in RNA?
+# - BATCH_ENABLE and BATCH_GROUP options: line 327
+# - implement all BPyMesh_* used here with RNA
+# - getDerivedObjects is not fully replicated with .dupli* funcs
+# - talk to Campbell, this code won't work? lines 1867-1875
+# - don't know what those colbits are, do we need them? they're said to be deprecated in DNA_object_types.h: 1886-1893
+# - no hq normals: 1900-1901
+
+# TODO
+
+# - bpy.data.remove_scene: line 366
+# - bpy.sys.time move to bpy.sys.util?
+# - new scene creation, activation: lines 327-342, 368
+# - uses bpy.path.abspath, *.relpath - replace at least relpath
diff --git a/io_scene_obj/__init__.py b/io_scene_obj/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc7422d376ff92f9b6684133ead175092923b844
--- /dev/null
+++ b/io_scene_obj/__init__.py
@@ -0,0 +1,143 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+    "name": "Wavefront OBJ format",
+    "author": "Campbell Barton",
+    "location": "File > Import-Export",
+    "description": "Import-Export X3D, Import OBJ mesh, UV's, materials and textures",
+    "warning": "",
+    "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+        "Scripts/Import-Export/Wavefront_OBJ",
+    "tracker_url": "",
+    "support": 'OFFICIAL',
+    "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+    import imp
+    if "import_obj" in locals():
+        imp.reload(import_obj)
+    if "export_obj" in locals():
+        imp.reload(export_obj)
+
+
+import bpy
+from bpy.props import *
+from io_utils import ExportHelper, ImportHelper
+
+
+class ImportOBJ(bpy.types.Operator, ImportHelper):
+    '''Load a Wavefront OBJ File'''
+    bl_idname = "import_scene.obj"
+    bl_label = "Import OBJ"
+
+    filename_ext = ".obj"
+    filter_glob = StringProperty(default="*.obj;*.mtl", options={'HIDDEN'})
+
+    CREATE_SMOOTH_GROUPS = BoolProperty(name="Smooth Groups", description="Surround smooth groups by sharp edges", default=True)
+    CREATE_FGONS = BoolProperty(name="NGons as FGons", description="Import faces with more then 4 verts as fgons", default=True)
+    CREATE_EDGES = BoolProperty(name="Lines as Edges", description="Import lines and faces with 2 verts as edge", default=True)
+    SPLIT_OBJECTS = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
+    SPLIT_GROUPS = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
+    # old comment: only used for user feedback
+    # disabled this option because in old code a handler for it disabled SPLIT* params, it's not passed to load_obj
+    # KEEP_VERT_ORDER = BoolProperty(name="Keep Vert Order", description="Keep vert and face order, disables split options, enable for morph targets", default= True)
+    ROTATE_X90 = BoolProperty(name="-X90", description="Rotate X 90.", default=True)
+    CLAMP_SIZE = FloatProperty(name="Clamp Scale", description="Clamp the size to this maximum (Zero to Disable)", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=0.0)
+    POLYGROUPS = BoolProperty(name="Poly Groups", description="Import OBJ groups as vertex groups.", default=True)
+    IMAGE_SEARCH = BoolProperty(name="Image Search", description="Search subdirs for any assosiated images (Warning, may be slow)", default=True)
+
+    def execute(self, context):
+        # print("Selected: " + context.active_object.name)
+        from . import import_obj
+        return import_obj.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
+
+
+class ExportOBJ(bpy.types.Operator, ExportHelper):
+    '''Save a Wavefront OBJ File'''
+
+    bl_idname = "export_scene.obj"
+    bl_label = 'Export OBJ'
+    bl_options = {'PRESET'}
+
+    filename_ext = ".obj"
+    filter_glob = StringProperty(default="*.obj;*.mtl", options={'HIDDEN'})
+
+    # List of operator properties, the attributes will be assigned
+    # to the class instance from the operator settings before calling.
+
+    # context group
+    use_selection = BoolProperty(name="Selection Only", description="Export selected objects only", default=False)
+    use_all_scenes = BoolProperty(name="All Scenes", description="", default=False)
+    use_animation = BoolProperty(name="Animation", description="", default=False)
+
+    # object group
+    use_modifiers = BoolProperty(name="Apply Modifiers", description="Apply modifiers (preview resolution)", default=True)
+    use_rotate_x90 = BoolProperty(name="Rotate X90", description="", default=True)
+
+    # extra data group
+    use_edges = BoolProperty(name="Edges", description="", default=True)
+    use_normals = BoolProperty(name="Normals", description="", default=False)
+    use_hq_normals = BoolProperty(name="High Quality Normals", description="", default=True)
+    use_uvs = BoolProperty(name="UVs", description="", default=True)
+    use_materials = BoolProperty(name="Materials", description="", default=True)
+    copy_images = BoolProperty(name="Copy Images", description="", default=False)
+    use_triangles = BoolProperty(name="Triangulate", description="", default=False)
+    use_vertex_groups = BoolProperty(name="Polygroups", description="", default=False)
+    use_nurbs = BoolProperty(name="Nurbs", description="", default=False)
+
+    # grouping group
+    use_blen_objects = BoolProperty(name="Objects as OBJ Objects", description="", default=True)
+    group_by_object = BoolProperty(name="Objects as OBJ Groups ", description="", default=False)
+    group_by_material = BoolProperty(name="Material Groups", description="", default=False)
+    keep_vertex_order = BoolProperty(name="Keep Vertex Order", description="", default=False)
+
+    def execute(self, context):
+        from . import export_obj
+        return export_obj.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+
+def menu_func_import(self, context):
+    self.layout.operator(ImportOBJ.bl_idname, text="Wavefront (.obj)")
+
+
+def menu_func_export(self, context):
+    self.layout.operator(ExportOBJ.bl_idname, text="Wavefront (.obj)")
+
+
+def register():
+    bpy.types.INFO_MT_file_import.append(menu_func_import)
+    bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+    bpy.types.INFO_MT_file_import.remove(menu_func_import)
+    bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+
+# CONVERSION ISSUES
+# - matrix problem
+# - duplis - only tested dupliverts
+# - all scenes export
+# + normals calculation
+
+if __name__ == "__main__":
+    register()
diff --git a/io_scene_obj/export_obj.py b/io_scene_obj/export_obj.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b80b52f21df499b0fb919951105796859a32f5d
--- /dev/null
+++ b/io_scene_obj/export_obj.py
@@ -0,0 +1,836 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import os
+import time
+import shutil
+
+import bpy
+import mathutils
+
+def fixName(name):
+    if name is None:
+        return 'None'
+    else:
+        return name.replace(' ', '_')
+
+def write_mtl(scene, filepath, copy_images, mtl_dict):
+
+    world = scene.world
+    if world:
+        worldAmb = world.ambient_color[:]
+    else:
+        worldAmb = 0.0, 0.0, 0.0
+
+    dest_dir = os.path.dirname(filepath)
+
+    def copy_image(image):
+        fn = bpy.path.abspath(image.filepath)
+        fn = os.path.normpath(fn)
+        fn_strip = os.path.basename(fn)
+
+        if copy_images:
+            rel = fn_strip
+            fn_abs_dest = os.path.join(dest_dir, fn_strip)
+            if not os.path.exists(fn_abs_dest):
+                shutil.copy(fn, fn_abs_dest)
+        elif bpy.path.is_subdir(fn, dest_dir):
+            rel = os.path.relpath(fn, dest_dir)
+        else:
+            rel = fn
+
+        return rel
+
+
+    file = open(filepath, "w", encoding='utf8')
+    file.write('# Blender MTL File: %r\n' % os.path.basename(bpy.data.filepath))
+    file.write('# Material Count: %i\n' % len(mtl_dict))
+    # Write material/image combinations we have used.
+    for key, (mtl_mat_name, mat, img) in mtl_dict.items():
+
+        # Get the Blender data for the material and the image.
+        # Having an image named None will make a bug, dont do it :)
+
+        file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
+
+        if mat:
+            file.write('Ns %.6f\n' % ((mat.specular_hardness-1) * 1.9607843137254901)) # Hardness, convert blenders 1-511 to MTL's
+            file.write('Ka %.6f %.6f %.6f\n' %  tuple(c * mat.ambient for c in worldAmb)) # Ambient, uses mirror colour,
+            file.write('Kd %.6f %.6f %.6f\n' % tuple(c * mat.diffuse_intensity for c in mat.diffuse_color)) # Diffuse
+            file.write('Ks %.6f %.6f %.6f\n' % tuple(c * mat.specular_intensity for c in mat.specular_color)) # Specular
+            if hasattr(mat, "ior"):
+                file.write('Ni %.6f\n' % mat.ior) # Refraction index
+            else:
+                file.write('Ni %.6f\n' % 1.0)
+            file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
+
+            # 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting.
+            if mat.use_shadeless:
+                file.write('illum 0\n') # ignore lighting
+            elif mat.specular_intensity == 0:
+                file.write('illum 1\n') # no specular.
+            else:
+                file.write('illum 2\n') # light normaly
+
+        else:
+            #write a dummy material here?
+            file.write('Ns 0\n')
+            file.write('Ka %.6f %.6f %.6f\n' %  tuple(c for c in worldAmb)) # Ambient, uses mirror colour,
+            file.write('Kd 0.8 0.8 0.8\n')
+            file.write('Ks 0.8 0.8 0.8\n')
+            file.write('d 1\n') # No alpha
+            file.write('illum 2\n') # light normaly
+
+        # Write images!
+        if img:  # We have an image on the face!
+            # write relative image path
+            rel = copy_image(img)
+            file.write('map_Kd %s\n' % rel) # Diffuse mapping image
+#           file.write('map_Kd %s\n' % img.filepath.split('\\')[-1].split('/')[-1]) # Diffuse mapping image
+
+        elif mat: # No face image. if we havea material search for MTex image.
+            for mtex in mat.texture_slots:
+                if mtex and mtex.texture.type == 'IMAGE':
+                    try:
+                        filepath = copy_image(mtex.texture.image)
+#                       filepath = mtex.texture.image.filepath.split('\\')[-1].split('/')[-1]
+                        file.write('map_Kd %s\n' % repr(filepath)[1:-1]) # Diffuse mapping image
+                        break
+                    except:
+                        # Texture has no image though its an image type, best ignore.
+                        pass
+
+        file.write('\n\n')
+
+    file.close()
+
+# XXX not used
+def copy_file(source, dest):
+    file = open(source, 'rb')
+    data = file.read()
+    file.close()
+
+    file = open(dest, 'wb')
+    file.write(data)
+    file.close()
+
+
+# XXX not used
+def copy_images(dest_dir):
+    if dest_dir[-1] != os.sep:
+        dest_dir += os.sep
+
+    # Get unique image names
+    uniqueImages = {}
+    for matname, mat, image in mtl_dict.values(): # Only use image name
+        # Get Texface images
+        if image:
+            uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default.
+
+        # Get MTex images
+        if mat:
+            for mtex in mat.texture_slots:
+                if mtex and mtex.texture.type == 'IMAGE':
+                    image_tex = mtex.texture.image
+                    if image_tex:
+                        try:
+                            uniqueImages[image_tex] = image_tex
+                        except:
+                            pass
+
+    # Now copy images
+    copyCount = 0
+
+#   for bImage in uniqueImages.values():
+#       image_path = bpy.path.abspath(bImage.filepath)
+#       if bpy.sys.exists(image_path):
+#           # Make a name for the target path.
+#           dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
+#           if not bpy.utils.exists(dest_image_path): # Image isnt already there
+#               print('\tCopying "%s" > "%s"' % (image_path, dest_image_path))
+#               copy_file(image_path, dest_image_path)
+#               copyCount+=1
+
+#   paths= bpy.util.copy_images(uniqueImages.values(), dest_dir)
+
+    print('\tCopied %d images' % copyCount)
+
+
+def test_nurbs_compat(ob):
+    if ob.type != 'CURVE':
+        return False
+
+    for nu in ob.data.splines:
+        if nu.point_count_v == 1 and nu.type != 'BEZIER': # not a surface and not bezier
+            return True
+
+    return False
+
+
+def write_nurb(file, ob, ob_mat):
+    tot_verts = 0
+    cu = ob.data
+
+    # use negative indices
+    for nu in cu.splines:
+        if nu.type == 'POLY':
+            DEG_ORDER_U = 1
+        else:
+            DEG_ORDER_U = nu.order_u - 1  # odd but tested to be correct
+
+        if nu.type == 'BEZIER':
+            print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
+            continue
+
+        if nu.point_count_v > 1:
+            print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
+            continue
+
+        if len(nu.points) <= DEG_ORDER_U:
+            print("\tWarning, order_u is lower then vert count, skipping:", ob.name)
+            continue
+
+        pt_num = 0
+        do_closed = nu.use_cyclic_u
+        do_endpoints = (do_closed == 0) and nu.use_endpoint_u
+
+        for pt in nu.points:
+            pt = ob_mat * pt.co.copy().resize3D()
+            file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2]))
+            pt_num += 1
+        tot_verts += pt_num
+
+        file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too
+        file.write('cstype bspline\n') # not ideal, hard coded
+        file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
+
+        curve_ls = [-(i+1) for i in range(pt_num)]
+
+        # 'curv' keyword
+        if do_closed:
+            if DEG_ORDER_U == 1:
+                pt_num += 1
+                curve_ls.append(-1)
+            else:
+                pt_num += DEG_ORDER_U
+                curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
+
+        file.write('curv 0.0 1.0 %s\n' % (' '.join([str(i) for i in curve_ls]))) # Blender has no U and V values for the curve
+
+        # 'parm' keyword
+        tot_parm = (DEG_ORDER_U + 1) + pt_num
+        tot_parm_div = float(tot_parm-1)
+        parm_ls = [(i/tot_parm_div) for i in range(tot_parm)]
+
+        if do_endpoints: # end points, force param
+            for i in range(DEG_ORDER_U+1):
+                parm_ls[i] = 0.0
+                parm_ls[-(1+i)] = 1.0
+
+        file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] ))
+
+        file.write('end\n')
+
+    return tot_verts
+
+def write_file(filepath, objects, scene,
+          EXPORT_TRI=False,
+          EXPORT_EDGES=False,
+          EXPORT_NORMALS=False,
+          EXPORT_NORMALS_HQ=False,
+          EXPORT_UV=True,
+          EXPORT_MTL=True,
+          EXPORT_COPY_IMAGES=False,
+          EXPORT_APPLY_MODIFIERS=True,
+          EXPORT_ROTX90=True,
+          EXPORT_BLEN_OBS=True,
+          EXPORT_GROUP_BY_OB=False,
+          EXPORT_GROUP_BY_MAT=False,
+          EXPORT_KEEP_VERT_ORDER=False,
+          EXPORT_POLYGROUPS=False,
+          EXPORT_CURVE_AS_NURBS=True):
+    '''
+    Basic write function. The context and options must be already set
+    This can be accessed externaly
+    eg.
+    write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
+    '''
+
+    # XXX
+    import math
+
+    def veckey3d(v):
+        return round(v.x, 6), round(v.y, 6), round(v.z, 6)
+
+    def veckey2d(v):
+        return round(v[0], 6), round(v[1], 6)
+
+    def findVertexGroupName(face, vWeightMap):
+        """
+        Searches the vertexDict to see what groups is assigned to a given face.
+        We use a frequency system in order to sort out the name because a given vetex can
+        belong to two or more groups at the same time. To find the right name for the face
+        we list all the possible vertex group names with their frequency and then sort by
+        frequency in descend order. The top element is the one shared by the highest number
+        of vertices is the face's group
+        """
+        weightDict = {}
+        for vert_index in face.vertices:
+#       for vert in face:
+            vWeights = vWeightMap[vert_index]
+#           vWeights = vWeightMap[vert]
+            for vGroupName, weight in vWeights:
+                weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
+
+        if weightDict:
+            alist = [(weight,vGroupName) for vGroupName, weight in weightDict.items()] # sort least to greatest amount of weight
+            alist.sort()
+            return(alist[-1][1]) # highest value last
+        else:
+            return '(null)'
+
+    print('OBJ Export path: %r' % filepath)
+    temp_mesh_name = '~tmp-mesh'
+
+    time1 = time.clock()
+#   time1 = sys.time()
+#   scn = Scene.GetCurrent()
+
+    file = open(filepath, "w")
+
+    # Write Header
+    file.write('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
+    file.write('# www.blender.org\n')
+
+    # Tell the obj file what material file to use.
+    if EXPORT_MTL:
+        mtlfilepath = os.path.splitext(filepath)[0] + ".mtl"
+        file.write('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1]) # filepath can contain non utf8 chars, use repr
+
+    if EXPORT_ROTX90:
+        mat_xrot90= mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
+
+    # Initialize totals, these are updated each object
+    totverts = totuvco = totno = 1
+
+    face_vert_index = 1
+
+    globalNormals = {}
+
+    # A Dict of Materials
+    # (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
+    mtl_dict = {}
+
+    # Get all meshes
+    for ob_main in objects:
+
+        # ignore dupli children
+        if ob_main.parent and ob_main.parent.dupli_type != 'NONE':
+            # XXX
+            print(ob_main.name, 'is a dupli child - ignoring')
+            continue
+
+        obs = []
+        if ob_main.dupli_type != 'NONE':
+            # XXX
+            print('creating dupli_list on', ob_main.name)
+            ob_main.create_dupli_list(scene)
+
+            obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
+
+            # XXX debug print
+            print(ob_main.name, 'has', len(obs), 'dupli children')
+        else:
+            obs = [(ob_main, ob_main.matrix_world)]
+
+        for ob, ob_mat in obs:
+
+            # Nurbs curve support
+            if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
+                if EXPORT_ROTX90:
+                   ob_mat = ob_mat * mat_xrot90
+                totverts += write_nurb(file, ob, ob_mat)
+                continue
+            # END NURBS
+
+            if ob.type != 'MESH':
+                continue
+
+            me = ob.create_mesh(scene, EXPORT_APPLY_MODIFIERS, 'PREVIEW')
+
+            if EXPORT_ROTX90:
+                me.transform(mat_xrot90 * ob_mat)
+            else:
+                me.transform(ob_mat)
+
+#           # Will work for non meshes now! :)
+#           me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
+#           if not me:
+#               continue
+
+            if EXPORT_UV:
+                faceuv = len(me.uv_textures) > 0
+                if faceuv:
+                    uv_layer = me.uv_textures.active.data[:]
+            else:
+                faceuv = False
+
+            me_verts = me.vertices[:]
+
+            # Make our own list so it can be sorted to reduce context switching
+            face_index_pairs = [ (face, index) for index, face in enumerate(me.faces)]
+            # faces = [ f for f in me.faces ]
+
+            if EXPORT_EDGES:
+                edges = me.edges
+            else:
+                edges = []
+
+            if not (len(face_index_pairs)+len(edges)+len(me.vertices)): # Make sure there is somthing to write
+
+                # clean up
+                bpy.data.meshes.remove(me)
+
+                continue # dont bother with this mesh.
+
+            # XXX
+            # High Quality Normals
+            if EXPORT_NORMALS and face_index_pairs:
+                me.calc_normals()
+#               if EXPORT_NORMALS_HQ:
+#                   BPyMesh.meshCalcNormals(me)
+#               else:
+#                   # transforming normals is incorrect
+#                   # when the matrix is scaled,
+#                   # better to recalculate them
+#                   me.calcNormals()
+
+            materials = me.materials
+
+            materialNames = []
+            materialItems = [m for m in materials]
+            if materials:
+                for mat in materials:
+                    if mat:
+                        materialNames.append(mat.name)
+                    else:
+                        materialNames.append(None)
+                # Cant use LC because some materials are None.
+                # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.
+
+            # Possible there null materials, will mess up indicies
+            # but at least it will export, wait until Blender gets fixed.
+            materialNames.extend((16-len(materialNames)) * [None])
+            materialItems.extend((16-len(materialItems)) * [None])
+
+            # Sort by Material, then images
+            # so we dont over context switch in the obj file.
+            if EXPORT_KEEP_VERT_ORDER:
+                pass
+            elif faceuv:
+                face_index_pairs.sort(key=lambda a: (a[0].material_index, hash(uv_layer[a[1]].image), a[0].use_smooth))
+            elif len(materials) > 1:
+                face_index_pairs.sort(key = lambda a: (a[0].material_index, a[0].use_smooth))
+            else:
+                # no materials
+                face_index_pairs.sort(key = lambda a: a[0].use_smooth)
+#           if EXPORT_KEEP_VERT_ORDER:
+#               pass
+#           elif faceuv:
+#               try:    faces.sort(key = lambda a: (a.mat, a.image, a.use_smooth))
+#               except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.use_smooth), (b.mat, b.image, b.use_smooth)))
+#           elif len(materials) > 1:
+#               try:    faces.sort(key = lambda a: (a.mat, a.use_smooth))
+#               except: faces.sort(lambda a,b: cmp((a.mat, a.use_smooth), (b.mat, b.use_smooth)))
+#           else:
+#               # no materials
+#               try:    faces.sort(key = lambda a: a.use_smooth)
+#               except: faces.sort(lambda a,b: cmp(a.use_smooth, b.use_smooth))
+
+            # Set the default mat to no material and no image.
+            contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
+            contextSmooth = None # Will either be true or false,  set bad to force initialization switch.
+
+            if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
+                name1 = ob.name
+                name2 = ob.data.name
+                if name1 == name2:
+                    obnamestring = fixName(name1)
+                else:
+                    obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
+
+                if EXPORT_BLEN_OBS:
+                    file.write('o %s\n' % obnamestring) # Write Object name
+                else: # if EXPORT_GROUP_BY_OB:
+                    file.write('g %s\n' % obnamestring)
+
+
+            # Vert
+            for v in me_verts:
+                file.write('v %.6f %.6f %.6f\n' % v.co[:])
+
+            # UV
+            if faceuv:
+                uv_face_mapping = [[0,0,0,0] for i in range(len(face_index_pairs))] # a bit of a waste for tri's :/
+
+                uv_dict = {} # could use a set() here
+                uv_layer = me.uv_textures.active.data
+                for f, f_index in face_index_pairs:
+                    for uv_index, uv in enumerate(uv_layer[f_index].uv):
+                        uvkey = veckey2d(uv)
+                        try:
+                            uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
+                        except:
+                            uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
+                            file.write('vt %.6f %.6f\n' % uv[:])
+
+                uv_unique_count = len(uv_dict)
+#               del uv, uvkey, uv_dict, f_index, uv_index
+                # Only need uv_unique_count and uv_face_mapping
+
+            # NORMAL, Smooth/Non smoothed.
+            if EXPORT_NORMALS:
+                for f, f_index in face_index_pairs:
+                    if f.use_smooth:
+                        for v_idx in f.vertices:
+                            v = me_verts[v_idx]
+                            noKey = veckey3d(v.normal)
+                            if noKey not in globalNormals:
+                                globalNormals[noKey] = totno
+                                totno +=1
+                                file.write('vn %.6f %.6f %.6f\n' % noKey)
+                    else:
+                        # Hard, 1 normal from the face.
+                        noKey = veckey3d(f.normal)
+                        if noKey not in globalNormals:
+                            globalNormals[noKey] = totno
+                            totno +=1
+                            file.write('vn %.6f %.6f %.6f\n' % noKey)
+
+            if not faceuv:
+                f_image = None
+
+            # XXX
+            if EXPORT_POLYGROUPS:
+                # Retrieve the list of vertex groups
+                vertGroupNames = [g.name for g in ob.vertex_groups]
+
+                currentVGroup = ''
+                # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
+                vgroupsMap = [[] for _i in range(len(me_verts))]
+                for v_idx, v in enumerate(me.vertices):
+                    for g in v.groups:
+                        vgroupsMap[v_idx].append((vertGroupNames[g.group], g.weight))
+
+            for f, f_index in face_index_pairs:
+                f_smooth= f.use_smooth
+                f_mat = min(f.material_index, len(materialNames)-1)
+
+                if faceuv:
+                    tface = uv_layer[f_index]
+                    f_image = tface.image
+
+                # MAKE KEY
+                if faceuv and f_image: # Object is always true.
+                    key = materialNames[f_mat],  f_image.name
+                else:
+                    key = materialNames[f_mat],  None # No image, use None instead.
+
+                # Write the vertex group
+                if EXPORT_POLYGROUPS:
+                    if ob.vertex_groups:
+                        # find what vertext group the face belongs to
+                        theVGroup = findVertexGroupName(f,vgroupsMap)
+                        if  theVGroup != currentVGroup:
+                            currentVGroup = theVGroup
+                            file.write('g %s\n' % theVGroup)
+
+                # CHECK FOR CONTEXT SWITCH
+                if key == contextMat:
+                    pass # Context already switched, dont do anything
+                else:
+                    if key[0] is None and key[1] is None:
+                        # Write a null material, since we know the context has changed.
+                        if EXPORT_GROUP_BY_MAT:
+                            # can be mat_image or (null)
+                            file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.data.name)) ) # can be mat_image or (null)
+                        file.write('usemtl (null)\n') # mat, image
+
+                    else:
+                        mat_data= mtl_dict.get(key)
+                        if not mat_data:
+                            # First add to global dict so we can export to mtl
+                            # Then write mtl
+
+                            # Make a new names from the mat and image name,
+                            # converting any spaces to underscores with fixName.
+
+                            # If none image dont bother adding it to the name
+                            if key[1] is None:
+                                mat_data = mtl_dict[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
+                            else:
+                                mat_data = mtl_dict[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
+
+                        if EXPORT_GROUP_BY_MAT:
+                            file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.data.name), mat_data[0]) ) # can be mat_image or (null)
+
+                        file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
+
+                contextMat = key
+                if f_smooth != contextSmooth:
+                    if f_smooth: # on now off
+                        file.write('s 1\n')
+                        contextSmooth = f_smooth
+                    else: # was off now on
+                        file.write('s off\n')
+                        contextSmooth = f_smooth
+
+                f_v_orig = [(vi, me_verts[v_idx]) for vi, v_idx in enumerate(f.vertices)]
+                
+                if not EXPORT_TRI or len(f_v_orig) == 3:
+                    f_v_iter = (f_v_orig, )
+                else:
+                    f_v_iter = (f_v_orig[0], f_v_orig[1], f_v_orig[2]), (f_v_orig[0], f_v_orig[2], f_v_orig[3])
+
+                # support for triangulation
+                for f_v in f_v_iter:
+                    file.write('f')
+
+                    if faceuv:
+                        if EXPORT_NORMALS:
+                            if f_smooth: # Smoothed, use vertex normals
+                                for vi, v in f_v:
+                                    file.write( ' %d/%d/%d' % \
+                                                    (v.index + totverts,
+                                                     totuvco + uv_face_mapping[f_index][vi],
+                                                     globalNormals[ veckey3d(v.normal) ]) ) # vert, uv, normal
+
+                            else: # No smoothing, face normals
+                                no = globalNormals[ veckey3d(f.normal) ]
+                                for vi, v in f_v:
+                                    file.write( ' %d/%d/%d' % \
+                                                    (v.index + totverts,
+                                                     totuvco + uv_face_mapping[f_index][vi],
+                                                     no) ) # vert, uv, normal
+                        else: # No Normals
+                            for vi, v in f_v:
+                                file.write( ' %d/%d' % (\
+                                  v.index + totverts,\
+                                  totuvco + uv_face_mapping[f_index][vi])) # vert, uv
+
+                        face_vert_index += len(f_v)
+
+                    else: # No UV's
+                        if EXPORT_NORMALS:
+                            if f_smooth: # Smoothed, use vertex normals
+                                for vi, v in f_v:
+                                    file.write( ' %d//%d' %
+                                                (v.index + totverts, globalNormals[ veckey3d(v.normal) ]) )
+                            else: # No smoothing, face normals
+                                no = globalNormals[ veckey3d(f.normal) ]
+                                for vi, v in f_v:
+                                    file.write( ' %d//%d' % (v.index + totverts, no) )
+                        else: # No Normals
+                            for vi, v in f_v:
+                                file.write( ' %d' % (v.index + totverts) )
+
+                    file.write('\n')
+
+            # Write edges.
+            if EXPORT_EDGES:
+                for ed in edges:
+                    if ed.is_loose:
+                        file.write('f %d %d\n' % (ed.vertices[0] + totverts, ed.vertices[1] + totverts))
+
+            # Make the indicies global rather then per mesh
+            totverts += len(me_verts)
+            if faceuv:
+                totuvco += uv_unique_count
+
+            # clean up
+            bpy.data.meshes.remove(me)
+
+        if ob_main.dupli_type != 'NONE':
+            ob_main.free_dupli_list()
+
+    file.close()
+
+
+    # Now we have all our materials, save them
+    if EXPORT_MTL:
+        write_mtl(scene, mtlfilepath, EXPORT_COPY_IMAGES, mtl_dict)
+#   if EXPORT_COPY_IMAGES:
+#       dest_dir = os.path.basename(filepath)
+# #         dest_dir = filepath
+# #         # Remove chars until we are just the path.
+# #         while dest_dir and dest_dir[-1] not in '\\/':
+# #             dest_dir = dest_dir[:-1]
+#       if dest_dir:
+#           copy_images(dest_dir, mtl_dict)
+#       else:
+#           print('\tError: "%s" could not be used as a base for an image path.' % filepath)
+
+    print("OBJ Export time: %.2f" % (time.clock() - time1))
+
+# 
+def _write(context, filepath,
+              EXPORT_TRI, # ok
+              EXPORT_EDGES,
+              EXPORT_NORMALS, # not yet
+              EXPORT_NORMALS_HQ, # not yet
+              EXPORT_UV, # ok
+              EXPORT_MTL,
+              EXPORT_COPY_IMAGES,
+              EXPORT_APPLY_MODIFIERS, # ok
+              EXPORT_ROTX90, # wrong
+              EXPORT_BLEN_OBS,
+              EXPORT_GROUP_BY_OB,
+              EXPORT_GROUP_BY_MAT,
+              EXPORT_KEEP_VERT_ORDER,
+              EXPORT_POLYGROUPS,
+              EXPORT_CURVE_AS_NURBS,
+              EXPORT_SEL_ONLY, # ok
+              EXPORT_ALL_SCENES, # XXX not working atm
+              EXPORT_ANIMATION): # Not used
+    
+    base_name, ext = os.path.splitext(filepath)
+    context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
+
+    orig_scene = context.scene
+
+    # Exit edit mode before exporting, so current object states are exported properly.
+    if bpy.ops.object.mode_set.poll():
+        bpy.ops.object.mode_set(mode='OBJECT')
+
+#   if EXPORT_ALL_SCENES:
+#       export_scenes = bpy.data.scenes
+#   else:
+#       export_scenes = [orig_scene]
+
+    # XXX only exporting one scene atm since changing
+    # current scene is not possible.
+    # Brecht says that ideally in 2.5 we won't need such a function,
+    # allowing multiple scenes open at once.
+    export_scenes = [orig_scene]
+
+    # Export all scenes.
+    for scene in export_scenes:
+        #       scene.makeCurrent() # If already current, this is not slow.
+        #       context = scene.getRenderingContext()
+        orig_frame = scene.frame_current
+
+        if EXPORT_ALL_SCENES: # Add scene name into the context_name
+            context_name[1] = '_%s' % bpy.path.clean_name(scene.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied.
+
+        # Export an animation?
+        if EXPORT_ANIMATION:
+            scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame.
+        else:
+            scene_frames = [orig_frame] # Dont export an animation.
+
+        # Loop through all frames in the scene and export.
+        for frame in scene_frames:
+            if EXPORT_ANIMATION: # Add frame to the filepath.
+                context_name[2] = '_%.6d' % frame
+
+            scene.frame_set(frame, 0.0)
+            if EXPORT_SEL_ONLY:
+                objects = context.selected_objects
+            else:
+                objects = scene.objects
+
+            full_path = ''.join(context_name)
+
+            # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
+            # EXPORT THE FILE.
+            write_file(full_path, objects, scene,
+                  EXPORT_TRI,
+                  EXPORT_EDGES,
+                  EXPORT_NORMALS,
+                  EXPORT_NORMALS_HQ,
+                  EXPORT_UV,
+                  EXPORT_MTL,
+                  EXPORT_COPY_IMAGES,
+                  EXPORT_APPLY_MODIFIERS,
+                  EXPORT_ROTX90,
+                  EXPORT_BLEN_OBS,
+                  EXPORT_GROUP_BY_OB,
+                  EXPORT_GROUP_BY_MAT,
+                  EXPORT_KEEP_VERT_ORDER,
+                  EXPORT_POLYGROUPS,
+                  EXPORT_CURVE_AS_NURBS)
+
+        scene.frame_set(orig_frame, 0.0)
+
+    # Restore old active scene.
+#   orig_scene.makeCurrent()
+#   Window.WaitCursor(0)
+
+
+'''
+Currently the exporter lacks these features:
+* multiple scene export (only active scene is written)
+* particles
+'''
+
+
+def save(operator, context, filepath="",
+         use_triangles=False,
+         use_edges=True,
+         use_normals=False,
+         use_hq_normals=False,
+         use_uvs=True,
+         use_materials=True,
+         copy_images=False,
+         use_modifiers=True,
+         use_rotate_x90=True,
+         use_blen_objects=True,
+         group_by_object=False,
+         group_by_material=False,
+         keep_vertex_order=False,
+         use_vertex_groups=False,
+         use_nurbs=True,
+         use_selection=True,
+         use_all_scenes=False,
+         use_animation=False,
+         ):
+
+    _write(context, filepath,
+           EXPORT_TRI=use_triangles,
+           EXPORT_EDGES=use_edges,
+           EXPORT_NORMALS=use_normals,
+           EXPORT_NORMALS_HQ=use_hq_normals,
+           EXPORT_UV=use_uvs,
+           EXPORT_MTL=use_materials,
+           EXPORT_COPY_IMAGES=copy_images,
+           EXPORT_APPLY_MODIFIERS=use_modifiers,
+           EXPORT_ROTX90=use_rotate_x90,
+           EXPORT_BLEN_OBS=use_blen_objects,
+           EXPORT_GROUP_BY_OB=group_by_object,
+           EXPORT_GROUP_BY_MAT=group_by_material,
+           EXPORT_KEEP_VERT_ORDER=keep_vertex_order,
+           EXPORT_POLYGROUPS=use_vertex_groups,
+           EXPORT_CURVE_AS_NURBS=use_nurbs,
+           EXPORT_SEL_ONLY=use_selection,
+           EXPORT_ALL_SCENES=use_all_scenes,
+           EXPORT_ANIMATION=use_animation,
+           )
+
+    return {'FINISHED'}
diff --git a/io_scene_obj/import_obj.py b/io_scene_obj/import_obj.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0c64c3d9b4d63c363d8ac4fd4f6d6d7008a5deb
--- /dev/null
+++ b/io_scene_obj/import_obj.py
@@ -0,0 +1,1219 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+# Contributors: Campbell Barton, Jiri Hnidek, Paolo Ciccone
+
+"""
+This script imports a Wavefront OBJ files to Blender.
+
+Usage:
+Run this script from "File->Import" menu and then load the desired OBJ file.
+Note, This loads mesh objects and materials only, nurbs and curves are not supported.
+
+http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj
+"""
+
+import os
+import time
+import bpy
+import mathutils
+from mathutils.geometry import tesselate_polygon
+from io_utils import load_image, unpack_list, unpack_face_list
+
+
+def BPyMesh_ngon(from_data, indices, PREF_FIX_LOOPS= True):
+    '''
+    Takes a polyline of indices (fgon)
+    and returns a list of face indicie lists.
+    Designed to be used for importers that need indices for an fgon to create from existing verts.
+
+    from_data: either a mesh, or a list/tuple of vectors.
+    indices: a list of indicies to use this list is the ordered closed polyline to fill, and can be a subset of the data given.
+    PREF_FIX_LOOPS: If this is enabled polylines that use loops to make multiple polylines are delt with correctly.
+    '''
+
+    if not set: # Need sets for this, otherwise do a normal fill.
+        PREF_FIX_LOOPS= False
+
+    Vector= mathutils.Vector
+    if not indices:
+        return []
+
+    #    return []
+    def rvec(co): return round(co.x, 6), round(co.y, 6), round(co.z, 6)
+    def mlen(co): return abs(co[0])+abs(co[1])+abs(co[2]) # manhatten length of a vector, faster then length
+
+    def vert_treplet(v, i):
+        return v, rvec(v), i, mlen(v)
+
+    def ed_key_mlen(v1, v2):
+        if v1[3] > v2[3]:
+            return v2[1], v1[1]
+        else:
+            return v1[1], v2[1]
+
+
+    if not PREF_FIX_LOOPS:
+        '''
+        Normal single concave loop filling
+        '''
+        if type(from_data) in (tuple, list):
+            verts= [Vector(from_data[i]) for ii, i in enumerate(indices)]
+        else:
+            verts= [from_data.vertices[i].co for ii, i in enumerate(indices)]
+
+        for i in range(len(verts)-1, 0, -1): # same as reversed(xrange(1, len(verts))):
+            if verts[i][1]==verts[i-1][0]:
+                verts.pop(i-1)
+
+        fill= fill_polygon([verts])
+
+    else:
+        '''
+        Seperate this loop into multiple loops be finding edges that are used twice
+        This is used by lightwave LWO files a lot
+        '''
+
+        if type(from_data) in (tuple, list):
+            verts= [vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices)]
+        else:
+            verts= [vert_treplet(from_data.vertices[i].co, ii) for ii, i in enumerate(indices)]
+
+        edges= [(i, i-1) for i in range(len(verts))]
+        if edges:
+            edges[0]= (0,len(verts)-1)
+
+        if not verts:
+            return []
+
+
+        edges_used= set()
+        edges_doubles= set()
+        # We need to check if any edges are used twice location based.
+        for ed in edges:
+            edkey= ed_key_mlen(verts[ed[0]], verts[ed[1]])
+            if edkey in edges_used:
+                edges_doubles.add(edkey)
+            else:
+                edges_used.add(edkey)
+
+        # Store a list of unconnected loop segments split by double edges.
+        # will join later
+        loop_segments= []
+
+        v_prev= verts[0]
+        context_loop= [v_prev]
+        loop_segments= [context_loop]
+
+        for v in verts:
+            if v!=v_prev:
+                # Are we crossing an edge we removed?
+                if ed_key_mlen(v, v_prev) in edges_doubles:
+                    context_loop= [v]
+                    loop_segments.append(context_loop)
+                else:
+                    if context_loop and context_loop[-1][1]==v[1]:
+                        #raise "as"
+                        pass
+                    else:
+                        context_loop.append(v)
+
+                v_prev= v
+        # Now join loop segments
+
+        def join_seg(s1,s2):
+            if s2[-1][1]==s1[0][1]: #
+                s1,s2= s2,s1
+            elif s1[-1][1]==s2[0][1]:
+                pass
+            else:
+                return False
+
+            # If were stuill here s1 and s2 are 2 segments in the same polyline
+            s1.pop() # remove the last vert from s1
+            s1.extend(s2) # add segment 2 to segment 1
+
+            if s1[0][1]==s1[-1][1]: # remove endpoints double
+                s1.pop()
+
+            s2[:]= [] # Empty this segment s2 so we dont use it again.
+            return True
+
+        joining_segments= True
+        while joining_segments:
+            joining_segments= False
+            segcount= len(loop_segments)
+
+            for j in range(segcount-1, -1, -1): #reversed(range(segcount)):
+                seg_j= loop_segments[j]
+                if seg_j:
+                    for k in range(j-1, -1, -1): # reversed(range(j)):
+                        if not seg_j:
+                            break
+                        seg_k= loop_segments[k]
+
+                        if seg_k and join_seg(seg_j, seg_k):
+                            joining_segments= True
+
+        loop_list= loop_segments
+
+        for verts in loop_list:
+            while verts and verts[0][1]==verts[-1][1]:
+                verts.pop()
+
+        loop_list= [verts for verts in loop_list if len(verts)>2]
+        # DONE DEALING WITH LOOP FIXING
+
+
+        # vert mapping
+        vert_map= [None]*len(indices)
+        ii=0
+        for verts in loop_list:
+            if len(verts)>2:
+                for i, vert in enumerate(verts):
+                    vert_map[i+ii]= vert[2]
+                ii+=len(verts)
+
+        fill= tesselate_polygon([ [v[0] for v in loop] for loop in loop_list ])
+        #draw_loops(loop_list)
+        #raise 'done loop'
+        # map to original indicies
+        fill= [[vert_map[i] for i in reversed(f)] for f in fill]
+
+
+    if not fill:
+        print('Warning Cannot scanfill, fallback on a triangle fan.')
+        fill= [ [0, i-1, i] for i in range(2, len(indices)) ]
+    else:
+        # Use real scanfill.
+        # See if its flipped the wrong way.
+        flip= None
+        for fi in fill:
+            if flip != None:
+                break
+            for i, vi in enumerate(fi):
+                if vi==0 and fi[i-1]==1:
+                    flip= False
+                    break
+                elif vi==1 and fi[i-1]==0:
+                    flip= True
+                    break
+
+        if not flip:
+            for i, fi in enumerate(fill):
+                fill[i]= tuple([ii for ii in reversed(fi)])
+
+    return fill
+
+def line_value(line_split):
+    '''
+    Returns 1 string represneting the value for this line
+    None will be returned if theres only 1 word
+    '''
+    length= len(line_split)
+    if length == 1:
+        return None
+
+    elif length == 2:
+        return line_split[1]
+
+    elif length > 2:
+        return ' '.join( line_split[1:] )
+
+
+def obj_image_load(imagepath, DIR, IMAGE_SEARCH):
+    if '_' in imagepath:
+        image= load_image(imagepath.replace('_', ' '), DIR)
+        if image:
+            return image
+
+    image = load_image(imagepath, DIR)
+    if image:
+        return image
+
+    print("failed to load '%s' doesn't exist", imagepath)
+    return None
+
+# def obj_image_load(imagepath, DIR, IMAGE_SEARCH):
+#     '''
+#     Mainly uses comprehensiveImageLoad
+#     but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores.
+#     '''
+
+#     if '_' in imagepath:
+#         image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
+#         if image: return image
+#         # Did the exporter rename the image?
+#         image= BPyImage.comprehensiveImageLoad(imagepath.replace('_', ' '), DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
+#         if image: return image
+
+#     # Return an image, placeholder if it dosnt exist
+#     image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= True, RECURSIVE= IMAGE_SEARCH)
+#     return image
+
+
+def create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH):
+    '''
+    Create all the used materials in this obj,
+    assign colors and images to the materials from all referenced material libs
+    '''
+    DIR= os.path.dirname(filepath)
+
+    #==================================================================================#
+    # This function sets textures defined in .mtl file                                 #
+    #==================================================================================#
+    def load_material_image(blender_material, context_material_name, imagepath, type):
+
+        texture= bpy.data.textures.new(name=type, type='IMAGE')
+
+        # Absolute path - c:\.. etc would work here
+        image = obj_image_load(imagepath, DIR, IMAGE_SEARCH)
+        has_data = False
+
+        if image:
+            texture.image = image
+            has_data = image.has_data
+
+        # Adds textures for materials (rendering)
+        if type == 'Kd':
+            if has_data and image.depth == 32:
+                # Image has alpha
+
+                mtex = blender_material.texture_slots.add()
+                mtex.texture = texture
+                mtex.texture_coords = 'UV'
+                mtex.use_map_color_diffuse = True
+                mtex.use_map_alpha = True
+
+                texture.mipmap = True
+                texture.interpolation = True
+                texture.use_alpha = True
+                blender_material.use_transparency = True
+                blender_material.alpha = 0.0
+            else:
+                mtex = blender_material.texture_slots.add()
+                mtex.texture = texture
+                mtex.texture_coords = 'UV'
+                mtex.use_map_color_diffuse = True
+
+            # adds textures to faces (Textured/Alt-Z mode)
+            # Only apply the diffuse texture to the face if the image has not been set with the inline usemat func.
+            unique_material_images[context_material_name]= image, has_data # set the texface image
+
+        elif type == 'Ka':
+            mtex = blender_material.texture_slots.add()
+            mtex.texture = texture
+            mtex.texture_coords = 'UV'
+            mtex.use_map_ambient = True
+
+        elif type == 'Ks':
+            mtex = blender_material.texture_slots.add()
+            mtex.texture = texture
+            mtex.texture_coords = 'UV'
+            mtex.use_map_specular = True
+
+        elif type == 'Bump':
+            mtex = blender_material.texture_slots.add()
+            mtex.texture = texture
+            mtex.texture_coords = 'UV'
+            mtex.use_map_normal = True
+
+        elif type == 'D':
+            mtex = blender_material.texture_slots.add()
+            mtex.texture = texture
+            mtex.texture_coords = 'UV'
+            mtex.use_map_alpha = True
+            blender_material.use_transparency = True
+            blender_material.transparency_method = 'Z_TRANSPARENCY'
+            blender_material.alpha = 0.0
+            # Todo, unset deffuse material alpha if it has an alpha channel
+
+        elif type == 'refl':
+            mtex = blender_material.texture_slots.add()
+            mtex.texture = texture
+            mtex.texture_coords = 'UV'
+            mtex.use_map_reflect = True
+
+
+    # Add an MTL with the same name as the obj if no MTLs are spesified.
+    temp_mtl = os.path.splitext((os.path.basename(filepath)))[0] + '.mtl'
+
+    if os.path.exists(os.path.join(DIR, temp_mtl)) and temp_mtl not in material_libs:
+        material_libs.append( temp_mtl )
+    del temp_mtl
+
+    #Create new materials
+    for name in unique_materials: # .keys()
+        if name != None:
+            unique_materials[name]= bpy.data.materials.new(name)
+            unique_material_images[name]= None, False # assign None to all material images to start with, add to later.
+
+    unique_materials[None]= None
+    unique_material_images[None]= None, False
+
+    for libname in material_libs:
+        mtlpath= os.path.join(DIR, libname)
+        if not os.path.exists(mtlpath):
+            print ("\tError Missing MTL: '%s'" % mtlpath)
+        else:
+            #print '\t\tloading mtl: "%s"' % mtlpath
+            context_material= None
+            mtl= open(mtlpath, 'rU')
+            for line in mtl: #.xreadlines():
+                if line.startswith('newmtl'):
+                    context_material_name= line_value(line.split())
+                    if context_material_name in unique_materials:
+                        context_material = unique_materials[ context_material_name ]
+                    else:
+                        context_material = None
+
+                elif context_material:
+                    # we need to make a material to assign properties to it.
+                    line_split= line.split()
+                    line_lower= line.lower().lstrip()
+                    if line_lower.startswith('ka'):
+                        context_material.mirror_color = float(line_split[1]), float(line_split[2]), float(line_split[3])
+                    elif line_lower.startswith('kd'):
+                        context_material.diffuse_color = float(line_split[1]), float(line_split[2]), float(line_split[3])
+                    elif line_lower.startswith('ks'):
+                        context_material.specular_color = float(line_split[1]), float(line_split[2]), float(line_split[3])
+                    elif line_lower.startswith('ns'):
+                        context_material.specular_hardness = int((float(line_split[1]) * 0.51))
+                    elif line_lower.startswith('ni'): # Refraction index
+                        context_material.raytrace_transparency.ior = max(1, min(float(line_split[1]), 3))  # between 1 and 3
+                    elif line_lower.startswith('d') or line_lower.startswith('tr'):
+                        context_material.alpha = float(line_split[1])
+                        context_material.use_transparency = True
+                        context_material.transparency_method = 'Z_TRANSPARENCY'
+                    elif line_lower.startswith('map_ka'):
+                        img_filepath= line_value(line.split())
+                        if img_filepath:
+                            load_material_image(context_material, context_material_name, img_filepath, 'Ka')
+                    elif line_lower.startswith('map_ks'):
+                        img_filepath= line_value(line.split())
+                        if img_filepath:
+                            load_material_image(context_material, context_material_name, img_filepath, 'Ks')
+                    elif line_lower.startswith('map_kd'):
+                        img_filepath= line_value(line.split())
+                        if img_filepath:
+                            load_material_image(context_material, context_material_name, img_filepath, 'Kd')
+                    elif line_lower.startswith('map_bump'):
+                        img_filepath= line_value(line.split())
+                        if img_filepath:
+                            load_material_image(context_material, context_material_name, img_filepath, 'Bump')
+                    elif line_lower.startswith('map_d') or line_lower.startswith('map_tr'):  # Alpha map - Dissolve
+                        img_filepath= line_value(line.split())
+                        if img_filepath:
+                            load_material_image(context_material, context_material_name, img_filepath, 'D')
+
+                    elif line_lower.startswith('refl'):  # reflectionmap
+                        img_filepath= line_value(line.split())
+                        if img_filepath:
+                            load_material_image(context_material, context_material_name, img_filepath, 'refl')
+            mtl.close()
+
+
+
+
+def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP):
+    '''
+    Takes vert_loc and faces, and separates into multiple sets of
+    (verts_loc, faces, unique_materials, dataname)
+    '''
+
+    filename = os.path.splitext((os.path.basename(filepath)))[0]
+
+    if not SPLIT_OB_OR_GROUP:
+        # use the filename for the object name since we arnt chopping up the mesh.
+        return [(verts_loc, faces, unique_materials, filename)]
+
+    def key_to_name(key):
+        # if the key is a tuple, join it to make a string
+        if not key:
+            return filename # assume its a string. make sure this is true if the splitting code is changed
+        else:
+            return key
+
+    # Return a key that makes the faces unique.
+    face_split_dict= {}
+
+    oldkey= -1 # initialize to a value that will never match the key
+
+    for face in faces:
+        key= face[4]
+
+        if oldkey != key:
+            # Check the key has changed.
+            try:
+                verts_split, faces_split, unique_materials_split, vert_remap= face_split_dict[key]
+            except KeyError:
+                faces_split= []
+                verts_split= []
+                unique_materials_split= {}
+                vert_remap= [-1]*len(verts_loc)
+
+                face_split_dict[key]= (verts_split, faces_split, unique_materials_split, vert_remap)
+
+            oldkey= key
+
+        face_vert_loc_indicies= face[0]
+
+        # Remap verts to new vert list and add where needed
+        for enum, i in enumerate(face_vert_loc_indicies):
+            if vert_remap[i] == -1:
+                new_index= len(verts_split)
+                vert_remap[i]= new_index # set the new remapped index so we only add once and can reference next time.
+                face_vert_loc_indicies[enum] = new_index # remap to the local index
+                verts_split.append( verts_loc[i] ) # add the vert to the local verts
+            else:
+                face_vert_loc_indicies[enum] = vert_remap[i] # remap to the local index
+
+            matname= face[2]
+            if matname and matname not in unique_materials_split:
+                unique_materials_split[matname] = unique_materials[matname]
+
+        faces_split.append(face)
+
+    # remove one of the itemas and reorder
+    return [(value[0], value[1], value[2], key_to_name(key)) for key, value in list(face_split_dict.items())]
+
+
+def create_mesh(new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, vertex_groups, dataname):
+    '''
+    Takes all the data gathered and generates a mesh, adding the new object to new_objects
+    deals with fgons, sharp edges and assigning materials
+    '''
+    if not has_ngons:
+        CREATE_FGONS= False
+
+    if unique_smooth_groups:
+        sharp_edges= {}
+        smooth_group_users = {context_smooth_group: {} for context_smooth_group in list(unique_smooth_groups.keys())}
+        context_smooth_group_old= -1
+
+    # Split fgons into tri's
+    fgon_edges= {} # Used for storing fgon keys
+    if CREATE_EDGES:
+        edges= []
+
+    context_object= None
+
+    # reverse loop through face indicies
+    for f_idx in range(len(faces)-1, -1, -1):
+
+        face_vert_loc_indicies,\
+        face_vert_tex_indicies,\
+        context_material,\
+        context_smooth_group,\
+        context_object= faces[f_idx]
+
+        len_face_vert_loc_indicies = len(face_vert_loc_indicies)
+
+        if len_face_vert_loc_indicies==1:
+            faces.pop(f_idx)# cant add single vert faces
+
+        elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2: # faces that have no texture coords are lines
+            if CREATE_EDGES:
+                # generators are better in python 2.4+ but can't be used in 2.3
+                # edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) )
+                edges.extend( [(face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in range(len_face_vert_loc_indicies-1)] )
+
+            faces.pop(f_idx)
+        else:
+
+            # Smooth Group
+            if unique_smooth_groups and context_smooth_group:
+                # Is a part of of a smooth group and is a face
+                if context_smooth_group_old is not context_smooth_group:
+                    edge_dict= smooth_group_users[context_smooth_group]
+                    context_smooth_group_old= context_smooth_group
+
+                for i in range(len_face_vert_loc_indicies):
+                    i1= face_vert_loc_indicies[i]
+                    i2= face_vert_loc_indicies[i-1]
+                    if i1>i2: i1,i2= i2,i1
+
+                    try:
+                        edge_dict[i1,i2]+= 1
+                    except KeyError:
+                        edge_dict[i1,i2]=  1
+
+            # FGons into triangles
+            if has_ngons and len_face_vert_loc_indicies > 4:
+
+                ngon_face_indices= BPyMesh_ngon(verts_loc, face_vert_loc_indicies)
+                faces.extend(
+                    [(
+                    [face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],
+                    [face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],
+                    context_material,
+                    context_smooth_group,
+                    context_object)
+                    for ngon in ngon_face_indices]
+                )
+
+                # edges to make fgons
+                if CREATE_FGONS:
+                    edge_users= {}
+                    for ngon in ngon_face_indices:
+                        for i in (0,1,2):
+                            i1= face_vert_loc_indicies[ngon[i  ]]
+                            i2= face_vert_loc_indicies[ngon[i-1]]
+                            if i1>i2: i1,i2= i2,i1
+
+                            try:
+                                edge_users[i1,i2]+=1
+                            except KeyError:
+                                edge_users[i1,i2]= 1
+
+                    for key, users in edge_users.items():
+                        if users>1:
+                            fgon_edges[key]= None
+
+                # remove all after 3, means we dont have to pop this one.
+                faces.pop(f_idx)
+
+
+    # Build sharp edges
+    if unique_smooth_groups:
+        for edge_dict in list(smooth_group_users.values()):
+            for key, users in list(edge_dict.items()):
+                if users==1: # This edge is on the boundry of a group
+                    sharp_edges[key]= None
+
+
+    # map the material names to an index
+    material_mapping = {name: i for i, name in enumerate(unique_materials)} # enumerate over unique_materials keys()
+
+    materials= [None] * len(unique_materials)
+
+    for name, index in list(material_mapping.items()):
+        materials[index]= unique_materials[name]
+
+    me= bpy.data.meshes.new(dataname)
+
+    # make sure the list isnt too big
+    for material in materials:
+        me.materials.append(material)
+
+    me.vertices.add(len(verts_loc))
+    me.faces.add(len(faces))
+
+    # verts_loc is a list of (x, y, z) tuples
+    me.vertices.foreach_set("co", unpack_list(verts_loc))
+
+    # faces is a list of (vert_indices, texco_indices, ...) tuples
+    # XXX faces should contain either 3 or 4 verts
+    # XXX no check for valid face indices
+    me.faces.foreach_set("vertices_raw", unpack_face_list([f[0] for f in faces]))
+
+    if verts_tex and me.faces:
+        me.uv_textures.new()
+
+    context_material_old= -1 # avoid a dict lookup
+    mat= 0 # rare case it may be un-initialized.
+    me_faces= me.faces
+
+    for i, face in enumerate(faces):
+        if len(face[0]) < 2:
+            pass #raise "bad face"
+        elif len(face[0])==2:
+            if CREATE_EDGES:
+                edges.append(face[0])
+        else:
+
+                blender_face = me.faces[i]
+
+                face_vert_loc_indicies,\
+                face_vert_tex_indicies,\
+                context_material,\
+                context_smooth_group,\
+                context_object= face
+
+
+
+                if context_smooth_group:
+                    blender_face.use_smooth = True
+
+                if context_material:
+                    if context_material_old is not context_material:
+                        mat= material_mapping[context_material]
+                        context_material_old= context_material
+
+                    blender_face.material_index= mat
+#                     blender_face.mat= mat
+
+
+                if verts_tex:
+
+                    blender_tface= me.uv_textures[0].data[i]
+
+                    if context_material:
+                        image, has_data = unique_material_images[context_material]
+                        if image: # Can be none if the material dosnt have an image.
+                            blender_tface.image = image
+                            blender_tface.use_image = True
+                            if has_data and image.depth == 32:
+                                blender_tface.blend_type = 'ALPHA'
+
+                    # BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled.
+                    if len(face_vert_loc_indicies)==4:
+                        if face_vert_loc_indicies[2]==0 or face_vert_loc_indicies[3]==0:
+                            face_vert_tex_indicies= face_vert_tex_indicies[2], face_vert_tex_indicies[3], face_vert_tex_indicies[0], face_vert_tex_indicies[1]
+                    else: # length of 3
+                        if face_vert_loc_indicies[2]==0:
+                            face_vert_tex_indicies= face_vert_tex_indicies[1], face_vert_tex_indicies[2], face_vert_tex_indicies[0]
+                    # END EEEKADOODLE FIX
+
+                    # assign material, uv's and image
+                    blender_tface.uv1= verts_tex[face_vert_tex_indicies[0]]
+                    blender_tface.uv2= verts_tex[face_vert_tex_indicies[1]]
+                    blender_tface.uv3= verts_tex[face_vert_tex_indicies[2]]
+
+                    if len(face_vert_loc_indicies)==4:
+                        blender_tface.uv4= verts_tex[face_vert_tex_indicies[3]]
+
+#                     for ii, uv in enumerate(blender_face.uv):
+#                         uv.x, uv.y=  verts_tex[face_vert_tex_indicies[ii]]
+    del me_faces
+#     del ALPHA
+
+    if CREATE_EDGES and not edges:
+        CREATE_EDGES = False
+
+    if CREATE_EDGES:
+        me.edges.add(len(edges))
+
+        # edges should be a list of (a, b) tuples
+        me.edges.foreach_set("vertices", unpack_list(edges))
+#         me_edges.extend( edges )
+
+#     del me_edges
+
+    # Add edge faces.
+#     me_edges= me.edges
+
+    def edges_match(e1, e2):
+        return (e1[0] == e2[0] and e1[1] == e2[1]) or (e1[0] == e2[1] and e1[1] == e2[0])
+
+    # XXX slow
+#     if CREATE_FGONS and fgon_edges:
+#         for fgon_edge in fgon_edges.keys():
+#             for ed in me.edges:
+#                 if edges_match(fgon_edge, ed.vertices):
+#                     ed.is_fgon = True
+
+#     if CREATE_FGONS and fgon_edges:
+#         FGON= Mesh.EdgeFlags.FGON
+#         for ed in me.findEdges( fgon_edges.keys() ):
+#             if ed is not None:
+#                 me_edges[ed].flag |= FGON
+#         del FGON
+
+    # XXX slow
+#     if unique_smooth_groups and sharp_edges:
+#         for sharp_edge in sharp_edges.keys():
+#             for ed in me.edges:
+#                 if edges_match(sharp_edge, ed.vertices):
+#                     ed.use_edge_sharp = True
+
+#     if unique_smooth_groups and sharp_edges:
+#         SHARP= Mesh.EdgeFlags.SHARP
+#         for ed in me.findEdges( sharp_edges.keys() ):
+#             if ed is not None:
+#                 me_edges[ed].flag |= SHARP
+#         del SHARP
+
+    me.update(calc_edges=CREATE_EDGES)
+#     me.calcNormals()
+
+    ob= bpy.data.objects.new("Mesh", me)
+    new_objects.append(ob)
+
+    # Create the vertex groups. No need to have the flag passed here since we test for the
+    # content of the vertex_groups. If the user selects to NOT have vertex groups saved then
+    # the following test will never run
+    for group_name, group_indicies in vertex_groups.items():
+        group= ob.vertex_groups.new(group_name)
+        ob.vertex_groups.assign(group_indicies, group, 1.0, 'REPLACE')
+
+
+def create_nurbs(context_nurbs, vert_loc, new_objects):
+    '''
+    Add nurbs object to blender, only support one type at the moment
+    '''
+    deg = context_nurbs.get('deg', (3,))
+    curv_range = context_nurbs.get('curv_range')
+    curv_idx = context_nurbs.get('curv_idx', [])
+    parm_u = context_nurbs.get('parm_u', [])
+    parm_v = context_nurbs.get('parm_v', [])
+    name = context_nurbs.get('name', 'ObjNurb')
+    cstype = context_nurbs.get('cstype')
+
+    if cstype is None:
+        print('\tWarning, cstype not found')
+        return
+    if cstype != 'bspline':
+        print('\tWarning, cstype is not supported (only bspline)')
+        return
+    if not curv_idx:
+        print('\tWarning, curv argument empty or not set')
+        return
+    if len(deg) > 1 or parm_v:
+        print('\tWarning, surfaces not supported')
+        return
+
+    cu = bpy.data.curves.new(name, 'CURVE')
+    cu.dimensions = '3D'
+
+    nu = cu.splines.new('NURBS')
+    nu.points.add(len(curv_idx) - 1) # a point is added to start with
+    nu.points.foreach_set("co", [co_axis for vt_idx in curv_idx for co_axis in (vert_loc[vt_idx] + (1.0,))])
+
+    nu.order_u = deg[0] + 1
+
+    # get for endpoint flag from the weighting
+    if curv_range and len(parm_u) > deg[0]+1:
+        do_endpoints = True
+        for i in range(deg[0]+1):
+
+            if abs(parm_u[i]-curv_range[0]) > 0.0001:
+                do_endpoints = False
+                break
+
+            if abs(parm_u[-(i+1)]-curv_range[1]) > 0.0001:
+                do_endpoints = False
+                break
+
+    else:
+        do_endpoints = False
+
+    if do_endpoints:
+        nu.use_endpoint_u = True
+
+
+    # close
+    '''
+    do_closed = False
+    if len(parm_u) > deg[0]+1:
+        for i in xrange(deg[0]+1):
+            #print curv_idx[i], curv_idx[-(i+1)]
+
+            if curv_idx[i]==curv_idx[-(i+1)]:
+                do_closed = True
+                break
+
+    if do_closed:
+        nu.use_cyclic_u = True
+    '''
+    
+    ob= bpy.data.objects.new("Nurb", cu)
+
+    new_objects.append(ob)
+
+
+def strip_slash(line_split):
+    if line_split[-1][-1]== '\\':
+        if len(line_split[-1])==1:
+            line_split.pop() # remove the \ item
+        else:
+            line_split[-1]= line_split[-1][:-1] # remove the \ from the end last number
+        return True
+    return False
+
+
+
+def get_float_func(filepath):
+    '''
+    find the float function for this obj file
+    - whether to replace commas or not
+    '''
+    file= open(filepath, 'rU')
+    for line in file: #.xreadlines():
+        line = line.lstrip()
+        if line.startswith('v'): # vn vt v
+            if ',' in line:
+                return lambda f: float(f.replace(',', '.'))
+            elif '.' in line:
+                return float
+
+    # incase all vert values were ints
+    return float
+
+def load(operator, context, filepath,
+         CLAMP_SIZE= 0.0,
+         CREATE_FGONS= True,
+         CREATE_SMOOTH_GROUPS= True,
+         CREATE_EDGES= True,
+         SPLIT_OBJECTS= True,
+         SPLIT_GROUPS= True,
+         ROTATE_X90= True,
+         IMAGE_SEARCH=True,
+         POLYGROUPS=False):
+    '''
+    Called by the user interface or another script.
+    load_obj(path) - should give acceptable results.
+    This function passes the file and sends the data off
+        to be split into objects and then converted into mesh objects
+    '''
+    print('\nimporting obj %r' % filepath)
+
+    if SPLIT_OBJECTS or SPLIT_GROUPS:
+        POLYGROUPS = False
+
+    time_main= time.time()
+#     time_main= sys.time()
+
+    verts_loc= []
+    verts_tex= []
+    faces= [] # tuples of the faces
+    material_libs= [] # filanems to material libs this uses
+    vertex_groups = {} # when POLYGROUPS is true
+
+    # Get the string to float conversion func for this file- is 'float' for almost all files.
+    float_func= get_float_func(filepath)
+
+    # Context variables
+    context_material= None
+    context_smooth_group= None
+    context_object= None
+    context_vgroup = None
+
+    # Nurbs
+    context_nurbs = {}
+    nurbs = []
+    context_parm = '' # used by nurbs too but could be used elsewhere
+
+    has_ngons= False
+    # has_smoothgroups= False - is explicit with len(unique_smooth_groups) being > 0
+
+    # Until we can use sets
+    unique_materials= {}
+    unique_material_images= {}
+    unique_smooth_groups= {}
+    # unique_obects= {} - no use for this variable since the objects are stored in the face.
+
+    # when there are faces that end with \
+    # it means they are multiline-
+    # since we use xreadline we cant skip to the next line
+    # so we need to know whether
+    context_multi_line= ''
+
+    print("\tparsing obj file...")
+    time_sub= time.time()
+#     time_sub= sys.time()
+
+    file= open(filepath, 'rU')
+    for line in file: #.xreadlines():
+        line = line.lstrip() # rare cases there is white space at the start of the line
+
+        if line.startswith('v '):
+            line_split= line.split()
+            # rotate X90: (x,-z,y)
+            verts_loc.append( (float_func(line_split[1]), -float_func(line_split[3]), float_func(line_split[2])) )
+
+        elif line.startswith('vn '):
+            pass
+
+        elif line.startswith('vt '):
+            line_split= line.split()
+            verts_tex.append( (float_func(line_split[1]), float_func(line_split[2])) )
+
+        # Handel faces lines (as faces) and the second+ lines of fa multiline face here
+        # use 'f' not 'f ' because some objs (very rare have 'fo ' for faces)
+        elif line.startswith('f') or context_multi_line == 'f':
+
+            if context_multi_line:
+                # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face
+                line_split= line.split()
+
+            else:
+                line_split= line[2:].split()
+                face_vert_loc_indicies= []
+                face_vert_tex_indicies= []
+
+                # Instance a face
+                faces.append((\
+                face_vert_loc_indicies,\
+                face_vert_tex_indicies,\
+                context_material,\
+                context_smooth_group,\
+                context_object\
+                ))
+
+            if strip_slash(line_split):
+                context_multi_line = 'f'
+            else:
+                context_multi_line = ''
+
+            for v in line_split:
+                obj_vert= v.split('/')
+
+                vert_loc_index= int(obj_vert[0])-1
+                # Add the vertex to the current group
+                # *warning*, this wont work for files that have groups defined around verts
+                if    POLYGROUPS and context_vgroup:
+                    vertex_groups[context_vgroup].append(vert_loc_index)
+
+                # Make relative negative vert indicies absolute
+                if vert_loc_index < 0:
+                    vert_loc_index= len(verts_loc) + vert_loc_index + 1
+
+                face_vert_loc_indicies.append(vert_loc_index)
+
+                if len(obj_vert)>1 and obj_vert[1]:
+                    # formatting for faces with normals and textures us
+                    # loc_index/tex_index/nor_index
+
+                    vert_tex_index= int(obj_vert[1])-1
+                    # Make relative negative vert indicies absolute
+                    if vert_tex_index < 0:
+                        vert_tex_index= len(verts_tex) + vert_tex_index + 1
+
+                    face_vert_tex_indicies.append(vert_tex_index)
+                else:
+                    # dummy
+                    face_vert_tex_indicies.append(0)
+
+            if len(face_vert_loc_indicies) > 4:
+                has_ngons= True
+
+        elif CREATE_EDGES and (line.startswith('l ') or context_multi_line == 'l'):
+            # very similar to the face load function above with some parts removed
+
+            if context_multi_line:
+                # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face
+                line_split= line.split()
+
+            else:
+                line_split= line[2:].split()
+                face_vert_loc_indicies= []
+                face_vert_tex_indicies= []
+
+                # Instance a face
+                faces.append((\
+                face_vert_loc_indicies,\
+                face_vert_tex_indicies,\
+                context_material,\
+                context_smooth_group,\
+                context_object\
+                ))
+
+            if strip_slash(line_split):
+                context_multi_line = 'l'
+            else:
+                context_multi_line = ''
+
+            isline= line.startswith('l')
+
+            for v in line_split:
+                vert_loc_index= int(v)-1
+
+                # Make relative negative vert indicies absolute
+                if vert_loc_index < 0:
+                    vert_loc_index= len(verts_loc) + vert_loc_index + 1
+
+                face_vert_loc_indicies.append(vert_loc_index)
+
+        elif line.startswith('s'):
+            if CREATE_SMOOTH_GROUPS:
+                context_smooth_group= line_value(line.split())
+                if context_smooth_group=='off':
+                    context_smooth_group= None
+                elif context_smooth_group: # is not None
+                    unique_smooth_groups[context_smooth_group]= None
+
+        elif line.startswith('o'):
+            if SPLIT_OBJECTS:
+                context_object= line_value(line.split())
+                # unique_obects[context_object]= None
+
+        elif line.startswith('g'):
+            if SPLIT_GROUPS:
+                context_object= line_value(line.split())
+                # print 'context_object', context_object
+                # unique_obects[context_object]= None
+            elif POLYGROUPS:
+                context_vgroup = line_value(line.split())
+                if context_vgroup and context_vgroup != '(null)':
+                    vertex_groups.setdefault(context_vgroup, [])
+                else:
+                    context_vgroup = None # dont assign a vgroup
+
+        elif line.startswith('usemtl'):
+            context_material= line_value(line.split())
+            unique_materials[context_material]= None
+        elif line.startswith('mtllib'): # usemap or usemat
+            material_libs = list(set(material_libs) | set(line.split()[1:])) # can have multiple mtllib filenames per line, mtllib can appear more than once, so make sure only occurance of material exists
+
+            # Nurbs support
+        elif line.startswith('cstype '):
+            context_nurbs['cstype']= line_value(line.split()) # 'rat bspline' / 'bspline'
+        elif line.startswith('curv ') or context_multi_line == 'curv':
+            line_split= line.split()
+
+            curv_idx = context_nurbs['curv_idx'] = context_nurbs.get('curv_idx', []) # incase were multiline
+
+            if not context_multi_line:
+                context_nurbs['curv_range'] = float_func(line_split[1]), float_func(line_split[2])
+                line_split[0:3] = [] # remove first 3 items
+
+            if strip_slash(line_split):
+                context_multi_line = 'curv'
+            else:
+                context_multi_line = ''
+
+
+            for i in line_split:
+                vert_loc_index = int(i)-1
+
+                if vert_loc_index < 0:
+                    vert_loc_index= len(verts_loc) + vert_loc_index + 1
+
+                curv_idx.append(vert_loc_index)
+
+        elif line.startswith('parm') or context_multi_line == 'parm':
+            line_split= line.split()
+
+            if context_multi_line:
+                context_multi_line = ''
+            else:
+                context_parm = line_split[1]
+                line_split[0:2] = [] # remove first 2
+
+            if strip_slash(line_split):
+                context_multi_line = 'parm'
+            else:
+                context_multi_line = ''
+
+            if context_parm.lower() == 'u':
+                context_nurbs.setdefault('parm_u', []).extend( [float_func(f) for f in line_split] )
+            elif context_parm.lower() == 'v': # surfaces not suported yet
+                context_nurbs.setdefault('parm_v', []).extend( [float_func(f) for f in line_split] )
+            # else: # may want to support other parm's ?
+
+        elif line.startswith('deg '):
+            context_nurbs['deg']= [int(i) for i in line.split()[1:]]
+        elif line.startswith('end'):
+            # Add the nurbs curve
+            if context_object:
+                context_nurbs['name'] = context_object
+            nurbs.append(context_nurbs)
+            context_nurbs = {}
+            context_parm = ''
+
+        ''' # How to use usemap? depricated?
+        elif line.startswith('usema'): # usemap or usemat
+            context_image= line_value(line.split())
+        '''
+
+    file.close()
+    time_new= time.time()
+#     time_new= sys.time()
+    print('%.4f sec' % (time_new-time_sub))
+    time_sub= time_new
+
+
+    print('\tloading materials and images...')
+    create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH)
+
+    time_new= time.time()
+#     time_new= sys.time()
+    print('%.4f sec' % (time_new-time_sub))
+    time_sub= time_new
+
+    if not ROTATE_X90:
+        verts_loc[:] = [(v[0], v[2], -v[1]) for v in verts_loc]
+
+    # deselect all
+    bpy.ops.object.select_all(action='DESELECT')
+
+    scene = context.scene
+#     scn.objects.selected = []
+    new_objects= [] # put new objects here
+
+    print('\tbuilding geometry...\n\tverts:%i faces:%i materials: %i smoothgroups:%i ...' % ( len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups) ))
+    # Split the mesh by objects/materials, may
+    if SPLIT_OBJECTS or SPLIT_GROUPS:    SPLIT_OB_OR_GROUP = True
+    else:                                SPLIT_OB_OR_GROUP = False
+
+    for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP):
+        # Create meshes from the data, warning 'vertex_groups' wont support splitting
+        create_mesh(new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc_split, verts_tex, faces_split, unique_materials_split, unique_material_images, unique_smooth_groups, vertex_groups, dataname)
+
+    # nurbs support
+    for context_nurbs in nurbs:
+        create_nurbs(context_nurbs, verts_loc, new_objects)
+
+    # Create new obj
+    for obj in new_objects:
+        base = scene.objects.link(obj)
+        base.select = True
+
+    scene.update()
+
+
+    axis_min= [ 1000000000]*3
+    axis_max= [-1000000000]*3
+
+#     if CLAMP_SIZE:
+#         # Get all object bounds
+#         for ob in new_objects:
+#             for v in ob.getBoundBox():
+#                 for axis, value in enumerate(v):
+#                     if axis_min[axis] > value:    axis_min[axis]= value
+#                     if axis_max[axis] < value:    axis_max[axis]= value
+
+#         # Scale objects
+#         max_axis= max(axis_max[0]-axis_min[0], axis_max[1]-axis_min[1], axis_max[2]-axis_min[2])
+#         scale= 1.0
+
+#         while CLAMP_SIZE < max_axis * scale:
+#             scale= scale/10.0
+
+#         for ob in new_objects:
+#             ob.setSize(scale, scale, scale)
+
+    # Better rotate the vert locations
+    #if not ROTATE_X90:
+    #    for ob in new_objects:
+    #        ob.RotX = -1.570796326794896558
+
+    time_new= time.time()
+#    time_new= sys.time()
+
+    print('finished importing: %r in %.4f sec.' % (filepath, (time_new-time_main)))
+    return {'FINISHED'}
+
+
+# NOTES (all line numbers refer to 2.4x import_obj.py, not this file)
+# check later: line 489
+# can convert now: edge flags, edges: lines 508-528
+# ngon (uses python module BPyMesh): 384-414
+# NEXT clamp size: get bound box with RNA
+# get back to l 140 (here)
+# search image in bpy.config.textureDir - load_image
+# replaced BPyImage.comprehensiveImageLoad with a simplified version that only checks additional directory specified, but doesn't search dirs recursively (obj_image_load)
+# bitmask won't work? - 132
+# uses bpy.sys.time()
+
+if __name__ == "__main__":
+    register()
diff --git a/io_scene_x3d/__init__.py b/io_scene_x3d/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..67d35ce9c0db7dc0cf1b644b78f0b1e306cfadc9
--- /dev/null
+++ b/io_scene_x3d/__init__.py
@@ -0,0 +1,96 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+    "name": "Web3D X3D/VRML format",
+    "author": "Campbell Barton, Bart",
+    "location": "File > Import-Export",
+    "description": "Import-Export X3D, Import VRML",
+    "warning": "",
+    "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+        "Scripts/Import-Export/Web3D",
+    "tracker_url": "",
+    "support": 'OFFICIAL',
+    "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+    import imp
+    if "export_x3d" in locals():
+        imp.reload(export_x3d)
+
+
+import bpy
+from bpy.props import *
+from io_utils import ImportHelper, ExportHelper
+
+
+class ImportX3D(bpy.types.Operator, ImportHelper):
+    '''Load a BVH motion capture file'''
+    bl_idname = "import_scene.x3d"
+    bl_label = "Import X3D/VRML"
+
+    filename_ext = ".x3d"
+    filter_glob = StringProperty(default="*.x3d;*.wrl", options={'HIDDEN'})
+
+    def execute(self, context):
+        from . import import_x3d
+        return import_x3d.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
+
+
+class ExportX3D(bpy.types.Operator, ExportHelper):
+    '''Export selection to Extensible 3D file (.x3d)'''
+    bl_idname = "export_scene.x3d"
+    bl_label = 'Export X3D'
+
+    filename_ext = ".x3d"
+    filter_glob = StringProperty(default="*.x3d", options={'HIDDEN'})
+
+    use_apply_modifiers = BoolProperty(name="Apply Modifiers", description="Use transformed mesh data from each object", default=True)
+    use_triangulate = BoolProperty(name="Triangulate", description="Triangulate quads.", default=False)
+    use_compress = BoolProperty(name="Compress", description="GZip the resulting file, requires a full python install", default=False)
+
+    def execute(self, context):
+        from . import export_x3d
+        return export_x3d.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+
+def menu_func_import(self, context):
+    self.layout.operator(ImportX3D.bl_idname, text="X3D Extensible 3D (.x3d/.wrl)")
+
+
+def menu_func_export(self, context):
+    self.layout.operator(ExportX3D.bl_idname, text="X3D Extensible 3D (.x3d)")
+
+
+def register():
+    bpy.types.INFO_MT_file_import.append(menu_func_import)
+    bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+    bpy.types.INFO_MT_file_import.remove(menu_func_import)
+    bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+# NOTES
+# - blender version is hardcoded
+
+if __name__ == "__main__":
+    register()
diff --git a/io_scene_x3d/export_x3d.py b/io_scene_x3d/export_x3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..c420b0cddd8acf26bd25d06d96336a821ef8d653
--- /dev/null
+++ b/io_scene_x3d/export_x3d.py
@@ -0,0 +1,847 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Contributors: bart:neeneenee*de, http://www.neeneenee.de/vrml, Campbell Barton
+
+"""
+This script exports to X3D format.
+
+Usage:
+Run this script from "File->Export" menu.  A pop-up will ask whether you
+want to export only selected or all relevant objects.
+
+Known issues:
+    Doesn't handle multiple materials (don't use material indices);<br>
+    Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);<br>
+    Can't get the texture array associated with material * not the UV ones;
+"""
+
+import math
+import os
+
+import bpy
+import mathutils
+
+from io_utils import create_derived_objects, free_derived_objects
+
+
+def round_color(col, cp):
+    return tuple([round(max(min(c, 1.0), 0.0), cp) for c in col])
+
+
+def matrix_direction(mtx):
+    return (mathutils.Vector((0.0, 0.0, -1.0)) * mtx.rotation_part()).normalize()[:]
+
+
+##########################################################
+# Functions for writing output file
+##########################################################
+
+
+class x3d_class:
+
+    def __init__(self, filepath):
+        #--- public you can change these ---
+        self.proto = 1
+        self.billnode = 0
+        self.halonode = 0
+        self.collnode = 0
+        self.verbose = 2	 # level of verbosity in console 0-none, 1-some, 2-most
+        self.cp = 3		  # decimals for material color values	 0.000 - 1.000
+        self.vp = 3		  # decimals for vertex coordinate values  0.000 - n.000
+        self.tp = 3		  # decimals for texture coordinate values 0.000 - 1.000
+        self.it = 3
+
+        self.global_matrix = mathutils.Matrix.Rotation(-(math.pi / 2.0), 4, 'X')
+
+        #--- class private don't touch ---
+        self.indentLevel = 0  # keeps track of current indenting
+        self.filepath = filepath
+        self.file = None
+        if filepath.lower().endswith('.x3dz'):
+            try:
+                import gzip
+                self.file = gzip.open(filepath, "w")
+            except:
+                print("failed to import compression modules, exporting uncompressed")
+                self.filepath = filepath[:-1]  # remove trailing z
+
+        if self.file is None:
+            self.file = open(self.filepath, "w", encoding='utf8')
+
+        self.bNav = 0
+        self.nodeID = 0
+        self.namesReserved = ("Anchor", "Appearance", "Arc2D", "ArcClose2D", "AudioClip", "Background", "Billboard",
+                             "BooleanFilter", "BooleanSequencer", "BooleanToggle", "BooleanTrigger", "Box", "Circle2D",
+                             "Collision", "Color", "ColorInterpolator", "ColorRGBA", "component", "Cone", "connect",
+                             "Contour2D", "ContourPolyline2D", "Coordinate", "CoordinateDouble", "CoordinateInterpolator",
+                             "CoordinateInterpolator2D", "Cylinder", "CylinderSensor", "DirectionalLight", "Disk2D",
+                             "ElevationGrid", "EspduTransform", "EXPORT", "ExternProtoDeclare", "Extrusion", "field",
+                             "fieldValue", "FillProperties", "Fog", "FontStyle", "GeoCoordinate", "GeoElevationGrid",
+                             "GeoLocationLocation", "GeoLOD", "GeoMetadata", "GeoOrigin", "GeoPositionInterpolator",
+                             "GeoTouchSensor", "GeoViewpoint", "Group", "HAnimDisplacer", "HAnimHumanoid", "HAnimJoint",
+                             "HAnimSegment", "HAnimSite", "head", "ImageTexture", "IMPORT", "IndexedFaceSet",
+                             "IndexedLineSet", "IndexedTriangleFanSet", "IndexedTriangleSet", "IndexedTriangleStripSet",
+                             "Inline", "IntegerSequencer", "IntegerTrigger", "IS", "KeySensor", "LineProperties", "LineSet",
+                             "LoadSensor", "LOD", "Material", "meta", "MetadataDouble", "MetadataFloat", "MetadataInteger",
+                             "MetadataSet", "MetadataString", "MovieTexture", "MultiTexture", "MultiTextureCoordinate",
+                             "MultiTextureTransform", "NavigationInfo", "Normal", "NormalInterpolator", "NurbsCurve",
+                             "NurbsCurve2D", "NurbsOrientationInterpolator", "NurbsPatchSurface",
+                             "NurbsPositionInterpolator", "NurbsSet", "NurbsSurfaceInterpolator", "NurbsSweptSurface",
+                             "NurbsSwungSurface", "NurbsTextureCoordinate", "NurbsTrimmedSurface", "OrientationInterpolator",
+                             "PixelTexture", "PlaneSensor", "PointLight", "PointSet", "Polyline2D", "Polypoint2D",
+                             "PositionInterpolator", "PositionInterpolator2D", "ProtoBody", "ProtoDeclare", "ProtoInstance",
+                             "ProtoInterface", "ProximitySensor", "ReceiverPdu", "Rectangle2D", "ROUTE", "ScalarInterpolator",
+                             "Scene", "Script", "Shape", "SignalPdu", "Sound", "Sphere", "SphereSensor", "SpotLight", "StaticGroup",
+                             "StringSensor", "Switch", "Text", "TextureBackground", "TextureCoordinate", "TextureCoordinateGenerator",
+                             "TextureTransform", "TimeSensor", "TimeTrigger", "TouchSensor", "Transform", "TransmitterPdu",
+                             "TriangleFanSet", "TriangleSet", "TriangleSet2D", "TriangleStripSet", "Viewpoint", "VisibilitySensor",
+                             "WorldInfo", "X3D", "XvlShell", "VertexShader", "FragmentShader", "MultiShaderAppearance", "ShaderAppearance")
+
+        self.namesFog = ("", "LINEAR", "EXPONENTIAL", "")
+
+##########################################################
+# Writing nodes routines
+##########################################################
+
+    def writeHeader(self):
+        #bfile = sys.expandpath( Blender.Get('filepath') ).replace('<', '&lt').replace('>', '&gt')
+        bfile = repr(os.path.basename(self.filepath).replace('<', '&lt').replace('>', '&gt'))[1:-1]  # use outfile name
+        self.file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
+        self.file.write("<!DOCTYPE X3D PUBLIC \"ISO//Web3D//DTD X3D 3.0//EN\" \"http://www.web3d.org/specifications/x3d-3.0.dtd\">\n")
+        self.file.write("<X3D version=\"3.0\" profile=\"Immersive\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema-instance\" xsd:noNamespaceSchemaLocation=\"http://www.web3d.org/specifications/x3d-3.0.xsd\">\n")
+        self.file.write("<head>\n")
+        self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % bfile)
+        # self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % sys.basename(bfile))
+        self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % bpy.app.version_string)
+        # self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % Blender.Get('version'))
+        self.file.write("\t<meta name=\"translator\" content=\"X3D exporter v1.55 (2006/01/17)\" />\n")
+        self.file.write("</head>\n")
+        self.file.write("<Scene>\n")
+
+    # This functionality is poorly defined, disabling for now - campbell
+    '''
+    def writeScript(self):
+        textEditor = Blender.Text.Get()
+        alltext = len(textEditor)
+        for i in xrange(alltext):
+            nametext = textEditor[i].name
+            nlines = textEditor[i].getNLines()
+            if (self.proto == 1):
+                if (nametext == "proto" or nametext == "proto.js" or nametext == "proto.txt") and (nlines != None):
+                    nalllines = len(textEditor[i].asLines())
+                    alllines = textEditor[i].asLines()
+                    for j in xrange(nalllines):
+                        self.write_indented(alllines[j] + "\n")
+            elif (self.proto == 0):
+                if (nametext == "route" or nametext == "route.js" or nametext == "route.txt") and (nlines != None):
+                    nalllines = len(textEditor[i].asLines())
+                    alllines = textEditor[i].asLines()
+                    for j in xrange(nalllines):
+                        self.write_indented(alllines[j] + "\n")
+        self.write_indented("\n")
+    '''
+
+    def writeViewpoint(self, ob, mat, scene):
+        loc, quat, scale = mat.decompose()
+        self.file.write("<Viewpoint DEF=\"%s\" " % (self.cleanStr(ob.name)))
+        self.file.write("description=\"%s\" " % (ob.name))
+        self.file.write("centerOfRotation=\"0 0 0\" ")
+        self.file.write("position=\"%3.2f %3.2f %3.2f\" " % loc[:])
+        self.file.write("orientation=\"%3.2f %3.2f %3.2f %3.2f\" " % (quat.axis[:] + (quat.angle, )))
+        self.file.write("fieldOfView=\"%.3f\" " % ob.data.angle)
+        self.file.write(" />\n\n")
+
+    def writeFog(self, world):
+        if world:
+            mtype = world.mist_settings.falloff
+            mparam = world.mist_settings
+        else:
+            return
+        if (mtype == 'LINEAR' or mtype == 'INVERSE_QUADRATIC'):
+            mtype = 1 if mtype == 'LINEAR' else 2
+        # if (mtype == 1 or mtype == 2):
+            self.file.write("<Fog fogType=\"%s\" " % self.namesFog[mtype])
+            self.file.write("color=\"%s %s %s\" " % round_color(world.horizon_color, self.cp))
+            self.file.write("visibilityRange=\"%s\" />\n\n" % round(mparam[2], self.cp))
+        else:
+            return
+
+    def writeNavigationInfo(self, scene):
+        self.file.write('<NavigationInfo headlight="false" visibilityLimit="0.0" type=\'"EXAMINE","ANY"\' avatarSize="0.25, 1.75, 0.75" />\n')
+
+    def writeSpotLight(self, ob, mtx, lamp, world):
+        safeName = self.cleanStr(ob.name)
+        if world:
+            ambi = world.ambient_color
+            ambientIntensity = ((ambi[0] + ambi[1] + ambi[2]) / 3.0) / 2.5
+            del ambi
+        else:
+            ambientIntensity = 0.0
+
+        # compute cutoff and beamwidth
+        intensity = min(lamp.energy / 1.75, 1.0)
+        beamWidth = lamp.spot_size * 0.37
+        # beamWidth=((lamp.spotSize*math.pi)/180.0)*.37
+        cutOffAngle = beamWidth * 1.3
+
+        dx, dy, dz = matrix_direction(mtx)
+
+        location = mtx.translation_part()
+
+        radius = lamp.distance * math.cos(beamWidth)
+        # radius = lamp.dist*math.cos(beamWidth)
+        self.file.write("<SpotLight DEF=\"%s\" " % safeName)
+        self.file.write("radius=\"%s\" " % (round(radius, self.cp)))
+        self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
+        self.file.write("intensity=\"%s\" " % (round(intensity, self.cp)))
+        self.file.write("color=\"%s %s %s\" " % round_color(lamp.color, self.cp))
+        self.file.write("beamWidth=\"%s\" " % (round(beamWidth, self.cp)))
+        self.file.write("cutOffAngle=\"%s\" " % (round(cutOffAngle, self.cp)))
+        self.file.write("direction=\"%s %s %s\" " % (round(dx, 3), round(dy, 3), round(dz, 3)))
+        self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0], 3), round(location[1], 3), round(location[2], 3)))
+
+    def writeDirectionalLight(self, ob, mtx, lamp, world):
+        safeName = self.cleanStr(ob.name)
+        if world:
+            ambi = world.ambient_color
+            # ambi = world.amb
+            ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3.0) / 2.5
+        else:
+            ambi = 0
+            ambientIntensity = 0
+
+        intensity = min(lamp.energy / 1.75, 1.0)
+        dx, dy, dz = matrix_direction(mtx)
+        self.file.write("<DirectionalLight DEF=\"%s\" " % safeName)
+        self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
+        self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0], self.cp), round(lamp.color[1], self.cp), round(lamp.color[2], self.cp)))
+        self.file.write("intensity=\"%s\" " % (round(intensity, self.cp)))
+        self.file.write("direction=\"%s %s %s\" />\n\n" % (round(dx, 4), round(dy, 4), round(dz, 4)))
+
+    def writePointLight(self, ob, mtx, lamp, world):
+        safeName = self.cleanStr(ob.name)
+        if world:
+            ambi = world.ambient_color
+            # ambi = world.amb
+            ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3) / 2.5
+        else:
+            ambi = 0
+            ambientIntensity = 0
+
+        location = mtx.translation_part()
+
+        self.file.write("<PointLight DEF=\"%s\" " % safeName)
+        self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
+        self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0], self.cp), round(lamp.color[1], self.cp), round(lamp.color[2], self.cp)))
+
+        self.file.write("intensity=\"%s\" " % (round(min(lamp.energy / 1.75, 1.0), self.cp)))
+        self.file.write("radius=\"%s\" " % lamp.distance)
+        self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0], 3), round(location[1], 3), round(location[2], 3)))
+
+    def secureName(self, name):
+        name = name + str(self.nodeID)
+        self.nodeID = self.nodeID + 1
+        if len(name) <= 3:
+            newname = "_" + str(self.nodeID)
+            return "%s" % (newname)
+        else:
+            for bad in ('"', '#', "'", ', ', '.', '[', '\\', ']', '{', '}'):
+                name = name.replace(bad, "_")
+            if name in self.namesReserved:
+                newname = name[0:3] + "_" + str(self.nodeID)
+                return "%s" % (newname)
+            elif name[0].isdigit():
+                newname = "_" + name + str(self.nodeID)
+                return "%s" % (newname)
+            else:
+                newname = name
+                return "%s" % (newname)
+
+    def writeIndexedFaceSet(self, ob, mesh, mtx, world, EXPORT_TRI=False):
+        fw = self.file.write
+        mesh_name_x3d = self.cleanStr(ob.name)
+
+        if not mesh.faces:
+            return
+
+        mode = []
+        # mode = 0
+        if mesh.uv_textures.active:
+        # if mesh.faceUV:
+            for face in mesh.uv_textures.active.data:
+            # for face in mesh.faces:
+                if face.use_halo and 'HALO' not in mode:
+                    mode += ['HALO']
+                if face.use_billboard and 'BILLBOARD' not in mode:
+                    mode += ['BILLBOARD']
+                if face.use_object_color and 'OBJECT_COLOR' not in mode:
+                    mode += ['OBJECT_COLOR']
+                if face.use_collision and 'COLLISION' not in mode:
+                    mode += ['COLLISION']
+                # mode |= face.mode
+
+        if 'HALO' in mode and self.halonode == 0:
+        # if mode & Mesh.FaceModes.HALO and self.halonode == 0:
+            self.write_indented("<Billboard axisOfRotation=\"0 0 0\">\n", 1)
+            self.halonode = 1
+        elif 'BILLBOARD' in mode and self.billnode == 0:
+        # elif mode & Mesh.FaceModes.BILLBOARD and self.billnode == 0:
+            self.write_indented("<Billboard axisOfRotation=\"0 1 0\">\n", 1)
+            self.billnode = 1
+        elif 'COLLISION' not in mode and self.collnode == 0:
+        # elif not mode & Mesh.FaceModes.DYNAMIC and self.collnode == 0:
+            self.write_indented("<Collision enabled=\"false\">\n", 1)
+            self.collnode = 1
+
+        loc, quat, sca = mtx.decompose()
+
+        self.write_indented("<Transform DEF=\"%s\" " % mesh_name_x3d, 1)
+        fw("translation=\"%.6f %.6f %.6f\" " % loc[:])
+        fw("scale=\"%.6f %.6f %.6f\" " % sca[:])
+        fw("rotation=\"%.6f %.6f %.6f %.6f\" " % (quat.axis[:] + (quat.angle, )))
+        fw(">\n")
+
+        if mesh.tag:
+            self.write_indented("<Group USE=\"G_%s\" />\n" % mesh_name_x3d, 1)
+        else:
+            mesh.tag = True
+
+            self.write_indented("<Group DEF=\"G_%s\">\n" % mesh_name_x3d, 1)
+
+            is_uv = bool(mesh.uv_textures.active)
+            # is_col, defined for each material
+
+            is_coords_written = False
+
+            mesh_materials = mesh.materials[:]
+            if not mesh_materials:
+                mesh_materials = [None]
+
+            mesh_material_tex = [None] * len(mesh_materials)
+            mesh_material_mtex = [None] * len(mesh_materials)
+            mesh_material_images = [None] * len(mesh_materials)
+
+            for i, material in enumerate(mesh_materials):
+                if material:
+                    for mtex in material.texture_slots:
+                        if mtex:
+                            tex = mtex.texture
+                            if tex and tex.type == 'IMAGE':
+                                image = tex.image
+                                if image:
+                                    mesh_material_tex[i] = tex
+                                    mesh_material_mtex[i] = mtex
+                                    mesh_material_images[i] = image
+                                    break
+
+            mesh_materials_use_face_texture = [getattr(material, "use_face_texture", True) for material in mesh_materials]
+
+            mesh_faces = mesh.faces[:]
+            mesh_faces_materials = [f.material_index for f in mesh_faces]
+            
+            if is_uv and True in mesh_materials_use_face_texture:
+                mesh_faces_image = [(fuv.image if (mesh_materials_use_face_texture[mesh_faces_materials[i]] and fuv.use_image) else mesh_material_images[mesh_faces_materials[i]]) for i, fuv in enumerate(mesh.uv_textures.active.data)]
+                mesh_faces_image_unique = set(mesh_faces_image)
+            elif len(set(mesh_material_images) | {None}) > 1:  # make sure there is at least one image
+                mesh_faces_image = [mesh_material_images[material_index] for material_index in mesh_faces_materials]
+                mesh_faces_image_unique = set(mesh_faces_image)
+            else:
+                mesh_faces_image = [None] * len(mesh_faces)
+                mesh_faces_image_unique = {None}
+
+            # group faces
+            face_groups = {}
+            for material_index in range(len(mesh_materials)):
+                for image in mesh_faces_image_unique:
+                    face_groups[material_index, image] = []
+            del mesh_faces_image_unique
+
+            for i, (material_index, image) in enumerate(zip(mesh_faces_materials, mesh_faces_image)):
+                face_groups[material_index, image].append(i)
+
+            for (material_index, image), face_group in face_groups.items():
+                if face_group:
+                    material = mesh_materials[material_index]
+
+                    self.write_indented("<Shape>\n", 1)
+                    is_smooth = False
+                    is_col = (mesh.vertex_colors.active and (material is None or material.use_vertex_color_paint))
+
+                    # kludge but as good as it gets!
+                    for i in face_group:
+                        if mesh_faces[i].use_smooth:
+                            is_smooth = True
+                            break
+
+                    if image:
+                        self.write_indented("<Appearance>\n", 1)
+                        self.writeImageTexture(image)
+
+                        if mesh_materials_use_face_texture[material_index]:
+                            if image.use_tiles:
+                                self.write_indented("<TextureTransform scale=\"%s %s\" />\n" % (image.tiles_x, image.tiles_y))
+                        else:
+                            # transform by mtex
+                            loc = mesh_material_mtex[material_index].offset[:2]
+
+                            # mtex_scale * tex_repeat
+                            sca_x, sca_y = mesh_material_mtex[material_index].scale[:2]
+
+                            sca_x *= mesh_material_tex[material_index].repeat_x
+                            sca_y *= mesh_material_tex[material_index].repeat_y
+
+                            # flip x/y is a sampling feature, convert to transform
+                            if mesh_material_tex[material_index].use_flip_axis:
+                                rot = math.pi / -2.0
+                                sca_x, sca_y = sca_y, -sca_x
+                            else:
+                                rot = 0.0
+
+                            self.write_indented("<TextureTransform ", 1)
+                            # fw("center=\"%.6f %.6f\" " % (0.0, 0.0))
+                            fw("translation=\"%.6f %.6f\" " % loc)
+                            fw("scale=\"%.6f %.6f\" " % (sca_x, sca_y))
+                            fw("rotation=\"%.6f\" " % rot)
+                            fw("/>\n")
+
+                        self.write_indented("</Appearance>\n", -1)
+
+                    elif material:
+                        self.write_indented("<Appearance>\n", 1)
+                        self.writeMaterial(material, self.cleanStr(material.name, ""), world)
+                        self.write_indented("</Appearance>\n", -1)
+
+                    #-- IndexedFaceSet or IndexedLineSet
+
+                    self.write_indented("<IndexedFaceSet ", 1)
+
+                    # --- Write IndexedFaceSet Attributes
+                    if mesh.show_double_sided:
+                        fw("solid=\"true\" ")
+                    else:
+                        fw("solid=\"false\" ")
+
+                    if is_smooth:
+                        fw("creaseAngle=\"%.4f\" " % mesh.auto_smooth_angle)
+
+                    if is_uv:
+                        # "texCoordIndex"
+                        fw("\n\t\t\ttexCoordIndex=\"")
+                        j = 0
+                        for i in face_group:
+                            if len(mesh_faces[i].vertices) == 4:
+                                fw("%d %d %d %d -1, " % (j, j + 1, j + 2, j + 3))
+                                j += 4
+                            else:
+                                fw("%d %d %d -1, " % (j, j + 1, j + 2))
+                                j += 3
+                        fw("\" ")
+                        # --- end texCoordIndex
+
+                    if is_col:
+                        fw("colorPerVertex=\"false\" ")
+
+                    if True:
+                        # "coordIndex"
+                        fw('coordIndex="')
+                        if EXPORT_TRI:
+                            for i in face_group:
+                                fv = mesh_faces[i].vertices[:]
+                                if len(fv) == 3:
+                                    fw("%i %i %i -1, " % fv)
+                                else:
+                                    fw("%i %i %i -1, " % (fv[0], fv[1], fv[2]))
+                                    fw("%i %i %i -1, " % (fv[0], fv[2], fv[3]))
+                        else:
+                            for i in face_group:
+                                fv = mesh_faces[i].vertices[:]
+                                if len(fv) == 3:
+                                    fw("%i %i %i -1, " % fv)
+                                else:
+                                    fw("%i %i %i %i -1, " % fv)
+
+                        fw("\" ")
+                        # --- end coordIndex
+
+                    # close IndexedFaceSet
+                    fw(">\n")
+
+                    # --- Write IndexedFaceSet Elements
+                    if True:
+                        if is_coords_written:
+                            self.write_indented("<Coordinate USE=\"%s%s\" />\n" % ("coord_", mesh_name_x3d))
+                        else:
+                            self.write_indented("<Coordinate DEF=\"%s%s\" \n" % ("coord_", mesh_name_x3d), 1)
+                            fw("\t\t\t\tpoint=\"")
+                            for v in mesh.vertices:
+                                fw("%.6f %.6f %.6f, " % v.co[:])
+                            fw("\" />")
+                            self.write_indented("\n", -1)
+                            is_coords_written = True
+
+                    if is_uv:
+                        self.write_indented("<TextureCoordinate point=\"", 1)
+                        fw = fw
+                        mesh_faces_uv = mesh.uv_textures.active.data
+                        for i in face_group:
+                            for uv in mesh_faces_uv[i].uv:
+                                fw("%.4f %.4f, " % uv[:])
+                        del mesh_faces_uv
+                        fw("\" />")
+                        self.write_indented("\n", -1)
+
+                    if is_col:
+                        self.write_indented("<Color color=\"", 1)
+                        # XXX, 1 color per face, only
+                        mesh_faces_col = mesh.vertex_colors.active.data
+                        for i in face_group:
+                            fw("%.3f %.3f %.3f, " % mesh_faces_col[i].color1[:])
+                        del mesh_faces_col
+                        fw("\" />")
+                        self.write_indented("\n", -1)
+
+                    #--- output vertexColors
+
+                    #--- output closing braces
+                    self.write_indented("</IndexedFaceSet>\n", -1)
+                    self.write_indented("</Shape>\n", -1)
+
+            self.write_indented("</Group>\n", -1)
+
+        self.write_indented("</Transform>\n", -1)
+
+        if self.halonode == 1:
+            self.write_indented("</Billboard>\n", -1)
+            self.halonode = 0
+
+        if self.billnode == 1:
+            self.write_indented("</Billboard>\n", -1)
+            self.billnode = 0
+
+        if self.collnode == 1:
+            self.write_indented("</Collision>\n", -1)
+            self.collnode = 0
+
+        fw("\n")
+
+    def writeMaterial(self, mat, matName, world):
+        # look up material name, use it if available
+        if mat.tag:
+            self.write_indented("<Material USE=\"MA_%s\" />\n" % matName)
+        else:
+            mat.tag = True
+
+            emit = mat.emit
+            ambient = mat.ambient / 3.0
+            diffuseColor = tuple(mat.diffuse_color)
+            if world:
+                ambiColor = tuple(((c * mat.ambient) * 2.0) for c in world.ambient_color)
+            else:
+                ambiColor = 0.0, 0.0, 0.0
+
+            emitColor = tuple(((c * emit) + ambiColor[i]) / 2.0 for i, c in enumerate(diffuseColor))
+            shininess = mat.specular_hardness / 512.0
+            specColor = tuple((c + 0.001) / (1.25 / (mat.specular_intensity + 0.001)) for c in mat.specular_color)
+            transp = 1.0 - mat.alpha
+
+            if mat.use_shadeless:
+                ambient = 1.0
+                shininess = 0.0
+                specColor = emitColor = diffuseColor
+
+            self.write_indented("<Material DEF=\"MA_%s\" " % matName, 1)
+            self.file.write("diffuseColor=\"%s %s %s\" " % round_color(diffuseColor, self.cp))
+            self.file.write("specularColor=\"%s %s %s\" " % round_color(specColor, self.cp))
+            self.file.write("emissiveColor=\"%s %s %s\" \n" % round_color(emitColor, self.cp))
+            self.write_indented("ambientIntensity=\"%s\" " % (round(ambient, self.cp)))
+            self.file.write("shininess=\"%s\" " % (round(shininess, self.cp)))
+            self.file.write("transparency=\"%s\" />" % (round(transp, self.cp)))
+            self.write_indented("\n", -1)
+
+    def writeImageTexture(self, image):
+        name = image.name
+        filepath = os.path.basename(image.filepath)
+        if image.tag:
+            self.write_indented("<ImageTexture USE=\"%s\" />\n" % self.cleanStr(name))
+        else:
+            image.tag = True
+
+            self.write_indented("<ImageTexture DEF=\"%s\" " % self.cleanStr(name), 1)
+            self.file.write("url=\"%s\" />" % filepath)
+            self.write_indented("\n", -1)
+
+    def writeBackground(self, world, alltextures):
+        if world:
+            worldname = world.name
+        else:
+            return
+
+        blending = world.use_sky_blend, world.use_sky_paper, world.use_sky_real
+
+        grd_triple = round_color(world.horizon_color, self.cp)
+        sky_triple = round_color(world.zenith_color, self.cp)
+        mix_triple = round_color(((grd_triple[i] + sky_triple[i]) / 2.0 for i in range(3)), self.cp)
+
+        self.file.write("<Background DEF=\"%s\" " % self.secureName(worldname))
+        # No Skytype - just Hor color
+        if blending == (False, False, False):
+            self.file.write("groundColor=\"%s %s %s\" " % grd_triple)
+            self.file.write("skyColor=\"%s %s %s\" " % grd_triple)
+        # Blend Gradient
+        elif blending == (True, False, False):
+            self.file.write("groundColor=\"%s %s %s, " % grd_triple)
+            self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " % mix_triple)
+            self.file.write("skyColor=\"%s %s %s, " % sky_triple)
+            self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " % mix_triple)
+        # Blend+Real Gradient Inverse
+        elif blending == (True, False, True):
+            self.file.write("groundColor=\"%s %s %s, " % sky_triple)
+            self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " % mix_triple)
+            self.file.write("skyColor=\"%s %s %s, " % grd_triple)
+            self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " % mix_triple)
+        # Paper - just Zen Color
+        elif blending == (False, False, True):
+            self.file.write("groundColor=\"%s %s %s\" " % sky_triple)
+            self.file.write("skyColor=\"%s %s %s\" " % sky_triple)
+        # Blend+Real+Paper - komplex gradient
+        elif blending == (True, True, True):
+            self.write_indented("groundColor=\"%s %s %s, " % sky_triple)
+            self.write_indented("%s %s %s\" groundAngle=\"1.57, 1.57\" " % grd_triple)
+            self.write_indented("skyColor=\"%s %s %s, " % sky_triple)
+            self.write_indented("%s %s %s\" skyAngle=\"1.57, 1.57\" " % grd_triple)
+        # Any Other two colors
+        else:
+            self.file.write("groundColor=\"%s %s %s\" " % grd_triple)
+            self.file.write("skyColor=\"%s %s %s\" " % sky_triple)
+
+        alltexture = len(alltextures)
+
+        for i in range(alltexture):
+            tex = alltextures[i]
+
+            if tex.type != 'IMAGE' or tex.image is None:
+                continue
+
+            namemat = tex.name
+            # namemat = alltextures[i].name
+
+            pic = tex.image
+
+            # using .expandpath just in case, os.path may not expect //
+            basename = os.path.basename(bpy.path.abspath(pic.filepath))
+
+            pic = alltextures[i].image
+            if (namemat == "back") and (pic != None):
+                self.file.write("\n\tbackUrl=\"%s\" " % basename)
+            elif (namemat == "bottom") and (pic != None):
+                self.write_indented("bottomUrl=\"%s\" " % basename)
+            elif (namemat == "front") and (pic != None):
+                self.write_indented("frontUrl=\"%s\" " % basename)
+            elif (namemat == "left") and (pic != None):
+                self.write_indented("leftUrl=\"%s\" " % basename)
+            elif (namemat == "right") and (pic != None):
+                self.write_indented("rightUrl=\"%s\" " % basename)
+            elif (namemat == "top") and (pic != None):
+                self.write_indented("topUrl=\"%s\" " % basename)
+        self.write_indented("/>\n\n")
+
+##########################################################
+# export routine
+##########################################################
+
+    def export(self, scene, world, alltextures,
+                EXPORT_APPLY_MODIFIERS=False,
+                EXPORT_TRI=False,
+                ):
+
+        # tag un-exported IDs
+        bpy.data.meshes.tag(False)
+        bpy.data.materials.tag(False)
+        bpy.data.images.tag(False)
+
+        print("Info: starting X3D export to %r..." % self.filepath)
+        self.writeHeader()
+        # self.writeScript()
+        self.writeNavigationInfo(scene)
+        self.writeBackground(world, alltextures)
+        self.writeFog(world)
+        self.proto = 0
+
+        for ob_main in [o for o in scene.objects if o.is_visible(scene)]:
+
+            free, derived = create_derived_objects(scene, ob_main)
+
+            if derived is None:
+                continue
+
+            for ob, ob_mat in derived:
+                objType = ob.type
+                objName = ob.name
+                ob_mat = self.global_matrix * ob_mat
+
+                if objType == 'CAMERA':
+                    self.writeViewpoint(ob, ob_mat, scene)
+                elif objType in ('MESH', 'CURVE', 'SURF', 'FONT'):
+                    if EXPORT_APPLY_MODIFIERS or objType != 'MESH':
+                        me = ob.create_mesh(scene, EXPORT_APPLY_MODIFIERS, 'PREVIEW')
+                    else:
+                        me = ob.data
+
+                    self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI=EXPORT_TRI)
+
+                    # free mesh created with create_mesh()
+                    if me != ob.data:
+                        bpy.data.meshes.remove(me)
+
+                elif objType == 'LAMP':
+                    data = ob.data
+                    datatype = data.type
+                    if datatype == 'POINT':
+                        self.writePointLight(ob, ob_mat, data, world)
+                    elif datatype == 'SPOT':
+                        self.writeSpotLight(ob, ob_mat, data, world)
+                    elif datatype == 'SUN':
+                        self.writeDirectionalLight(ob, ob_mat, data, world)
+                    else:
+                        self.writeDirectionalLight(ob, ob_mat, data, world)
+                else:
+                    #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
+                    pass
+
+            if free:
+                free_derived_objects(ob_main)
+
+        self.file.write("\n</Scene>\n</X3D>")
+
+        # if EXPORT_APPLY_MODIFIERS:
+        # 	if containerMesh:
+        # 		containerMesh.vertices = None
+
+        self.cleanup()
+
+##########################################################
+# Utility methods
+##########################################################
+
+    def cleanup(self):
+        self.file.close()
+        self.indentLevel = 0
+        print("Info: finished X3D export to %r" % self.filepath)
+
+    def cleanStr(self, name, prefix='rsvd_'):
+        """cleanStr(name,prefix) - try to create a valid VRML DEF name from object name"""
+
+        newName = name
+        if len(newName) == 0:
+            self.nNodeID += 1
+            return "%s%d" % (prefix, self.nNodeID)
+
+        if newName in self.namesReserved:
+            newName = '%s%s' % (prefix, newName)
+
+        if newName[0].isdigit():
+            newName = "%s%s" % ('_', newName)
+
+        for bad in [' ', '"', '#', "'", ', ', '.', '[', '\\', ']', '{', '}']:
+            newName = newName.replace(bad, '_')
+        return newName
+
+    def faceToString(self, face):
+
+        print("Debug: face.flag=0x%x (bitflags)" % face.flag)
+        if face.sel:
+            print("Debug: face.sel=true")
+
+        print("Debug: face.mode=0x%x (bitflags)" % face.mode)
+        if face.mode & Mesh.FaceModes.TWOSIDE:
+            print("Debug: face.mode twosided")
+
+        print("Debug: face.transp=0x%x (enum)" % face.blend_type)
+        if face.blend_type == Mesh.FaceTranspModes.SOLID:
+            print("Debug: face.transp.SOLID")
+
+        if face.image:
+            print("Debug: face.image=%s" % face.image.name)
+        print("Debug: face.materialIndex=%d" % face.materialIndex)
+
+    def meshToString(self, mesh):
+        # print("Debug: mesh.hasVertexUV=%d" % mesh.vertexColors)
+        print("Debug: mesh.faceUV=%d" % (len(mesh.uv_textures) > 0))
+        # print("Debug: mesh.faceUV=%d" % mesh.faceUV)
+        print("Debug: mesh.hasVertexColours=%d" % (len(mesh.vertex_colors) > 0))
+        # print("Debug: mesh.hasVertexColours=%d" % mesh.hasVertexColours())
+        print("Debug: mesh.vertices=%d" % len(mesh.vertices))
+        print("Debug: mesh.faces=%d" % len(mesh.faces))
+        print("Debug: mesh.materials=%d" % len(mesh.materials))
+
+        # s="%s %s %s" % (
+        # 	round(c.r/255.0,self.cp),
+        # 	round(c.g/255.0,self.cp),
+        # 	round(c.b/255.0,self.cp))
+        return s
+
+    # For writing well formed VRML code
+    #------------------------------------------------------------------------
+    def write_indented(self, s, inc=0):
+        if inc < 1:
+            self.indentLevel = self.indentLevel + inc
+
+        self.file.write((self.indentLevel * "\t") + s)
+
+        if inc > 0:
+            self.indentLevel = self.indentLevel + inc
+
+##########################################################
+# Callbacks, needed before Main
+##########################################################
+
+
+def save(operator, context, filepath="",
+          use_apply_modifiers=False,
+          use_triangulate=False,
+          use_compress=False):
+
+    if use_compress:
+        if not filepath.lower().endswith('.x3dz'):
+            filepath = '.'.join(filepath.split('.')[:-1]) + '.x3dz'
+    else:
+        if not filepath.lower().endswith('.x3d'):
+            filepath = '.'.join(filepath.split('.')[:-1]) + '.x3d'
+
+    scene = context.scene
+    world = scene.world
+
+    if bpy.ops.object.mode_set.poll():
+        bpy.ops.object.mode_set(mode='OBJECT')
+
+    # XXX these are global textures while .Get() returned only scene's?
+    alltextures = bpy.data.textures
+    # alltextures = Blender.Texture.Get()
+
+    wrlexport = x3d_class(filepath)
+    wrlexport.export(scene,
+                     world,
+                     alltextures,
+                     EXPORT_APPLY_MODIFIERS=use_apply_modifiers,
+                     EXPORT_TRI=use_triangulate,
+                     )
+
+    return {'FINISHED'}
diff --git a/io_scene_x3d/import_x3d.py b/io_scene_x3d/import_x3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..f28859438661164dcbaf5fefa20cb09c6ad6ce2a
--- /dev/null
+++ b/io_scene_x3d/import_x3d.py
@@ -0,0 +1,2658 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+DEBUG = False
+
+# This should work without a blender at all
+from os.path import exists
+
+
+def baseName(path):
+    return path.split('/')[-1].split('\\')[-1]
+
+
+def dirName(path):
+    return path[:-len(baseName(path))]
+
+
+def imageConvertCompat(path):
+
+    try:
+        import os
+    except:
+        return path
+    if os.sep == '\\':
+        return path  # assime win32 has quicktime, dont convert
+
+    if path.lower().endswith('.gif'):
+        path_to = path[:-3] + 'png'
+
+        '''
+        if exists(path_to):
+            return path_to
+        '''
+        # print('\n'+path+'\n'+path_to+'\n')
+        os.system('convert "%s" "%s"' % (path, path_to))  # for now just hope we have image magick
+
+        if exists(path_to):
+            return path_to
+
+    return path
+
+# notes
+# transform are relative
+# order dosnt matter for loc/size/rot
+# right handed rotation
+# angles are in radians
+# rotation first defines axis then ammount in radians
+
+
+# =============================== VRML Spesific
+
+def vrmlFormat(data):
+    '''
+    Keep this as a valid vrml file, but format in a way we can predict.
+    '''
+    # Strip all commends - # not in strings - warning multiline strings are ignored.
+    def strip_comment(l):
+        #l = ' '.join(l.split())
+        l = l.strip()
+
+        if l.startswith('#'):
+            return ''
+
+        i = l.find('#')
+
+        if i == -1:
+            return l
+
+        # Most cases accounted for! if we have a comment at the end of the line do this...
+        #j = l.find('url "')
+        j = l.find('"')
+
+        if j == -1:  # simple no strings
+            return l[:i].strip()
+
+        q = False
+        for i, c in enumerate(l):
+            if c == '"':
+                q = not q  # invert
+
+            elif c == '#':
+                if q == False:
+                    return l[:i - 1]
+
+        return l
+
+    data = '\n'.join([strip_comment(l) for l in data.split('\n')])  # remove all whitespace
+
+    EXTRACT_STRINGS = True  # only needed when strings or filesnames containe ,[]{} chars :/
+
+    if EXTRACT_STRINGS:
+
+        # We need this so we can detect URL's
+        data = '\n'.join([' '.join(l.split()) for l in data.split('\n')])  # remove all whitespace
+
+        string_ls = []
+
+        #search = 'url "'
+        search = '"'
+
+        ok = True
+        last_i = 0
+        while ok:
+            ok = False
+            i = data.find(search, last_i)
+            if i != -1:
+
+                start = i + len(search)  # first char after end of search
+                end = data.find('"', start)
+                if end != -1:
+                    item = data[start:end]
+                    string_ls.append(item)
+                    data = data[:start] + data[end:]
+                    ok = True  # keep looking
+
+                    last_i = (end - len(item)) + 1
+                    # print(last_i, item, '|' + data[last_i] + '|')
+
+    # done with messy extracting strings part
+
+    # Bad, dont take strings into account
+    '''
+    data = data.replace('#', '\n#')
+    data = '\n'.join([ll for l in data.split('\n') for ll in (l.strip(),) if not ll.startswith('#')]) # remove all whitespace
+    '''
+    data = data.replace('{', '\n{\n')
+    data = data.replace('}', '\n}\n')
+    data = data.replace('[', '\n[\n')
+    data = data.replace(']', '\n]\n')
+    data = data.replace(',', ' , ')  # make sure comma's seperate
+
+    if EXTRACT_STRINGS:
+        # add strings back in
+
+        search = '"'  # fill in these empty strings
+
+        ok = True
+        last_i = 0
+        while ok:
+            ok = False
+            i = data.find(search + '"', last_i)
+            # print(i)
+            if i != -1:
+                start = i + len(search)  # first char after end of search
+                item = string_ls.pop(0)
+                # print(item)
+                data = data[:start] + item + data[start:]
+
+                last_i = start + len(item) + 1
+
+                ok = True
+
+    # More annoying obscure cases where USE or DEF are placed on a newline
+    # data = data.replace('\nDEF ', ' DEF ')
+    # data = data.replace('\nUSE ', ' USE ')
+
+    data = '\n'.join([' '.join(l.split()) for l in data.split('\n')])  # remove all whitespace
+
+    # Better to parse the file accounting for multiline arrays
+    '''
+    data = data.replace(',\n', ' , ') # remove line endings with commas
+    data = data.replace(']', '\n]\n') # very very annoying - but some comma's are at the end of the list, must run this again.
+    '''
+
+    return [l for l in data.split('\n') if l]
+
+NODE_NORMAL = 1  # {}
+NODE_ARRAY = 2  # []
+NODE_REFERENCE = 3  # USE foobar
+# NODE_PROTO = 4 #
+
+lines = []
+
+
+def getNodePreText(i, words):
+    # print(lines[i])
+    use_node = False
+    while len(words) < 5:
+
+        if i >= len(lines):
+            break
+            '''
+        elif lines[i].startswith('PROTO'):
+            return NODE_PROTO, i+1
+            '''
+        elif lines[i] == '{':
+            # words.append(lines[i]) # no need
+            # print("OK")
+            return NODE_NORMAL, i + 1
+        elif lines[i].count('"') % 2 != 0:  # odd number of quotes? - part of a string.
+            # print('ISSTRING')
+            break
+        else:
+            new_words = lines[i].split()
+            if 'USE' in new_words:
+                use_node = True
+
+            words.extend(new_words)
+            i += 1
+
+        # Check for USE node - no {
+        # USE #id - should always be on the same line.
+        if use_node:
+            # print('LINE', i, words[:words.index('USE')+2])
+            words[:] = words[:words.index('USE') + 2]
+            if lines[i] == '{' and lines[i + 1] == '}':
+                # USE sometimes has {} after it anyway
+                i += 2
+            return NODE_REFERENCE, i
+
+    # print("error value!!!", words)
+    return 0, -1
+
+
+def is_nodeline(i, words):
+
+    if not lines[i][0].isalpha():
+        return 0, 0
+
+    #if lines[i].startswith('field'):
+    #   return 0, 0
+
+    # Is this a prototype??
+    if lines[i].startswith('PROTO'):
+        words[:] = lines[i].split()
+        return NODE_NORMAL, i + 1  # TODO - assumes the next line is a '[\n', skip that
+    if lines[i].startswith('EXTERNPROTO'):
+        words[:] = lines[i].split()
+        return NODE_ARRAY, i + 1  # TODO - assumes the next line is a '[\n', skip that
+
+    '''
+    proto_type, new_i = is_protoline(i, words, proto_field_defs)
+    if new_i != -1:
+        return proto_type, new_i
+    '''
+
+    # Simple "var [" type
+    if lines[i + 1] == '[':
+        if lines[i].count('"') % 2 == 0:
+            words[:] = lines[i].split()
+            return NODE_ARRAY, i + 2
+
+    node_type, new_i = getNodePreText(i, words)
+
+    if not node_type:
+        if DEBUG:
+            print("not node_type", lines[i])
+        return 0, 0
+
+    # Ok, we have a { after some values
+    # Check the values are not fields
+    for i, val in enumerate(words):
+        if i != 0 and words[i - 1] in ('DEF', 'USE'):
+            # ignore anything after DEF, it is a ID and can contain any chars.
+            pass
+        elif val[0].isalpha() and val not in ('TRUE', 'FALSE'):
+            pass
+        else:
+            # There is a number in one of the values, therefor we are not a node.
+            return 0, 0
+
+    #if node_type==NODE_REFERENCE:
+    #   print(words, "REF_!!!!!!!")
+    return node_type, new_i
+
+
+def is_numline(i):
+    '''
+    Does this line start with a number?
+    '''
+
+    # Works but too slow.
+    '''
+    l = lines[i]
+    for w in l.split():
+        if w==',':
+            pass
+        else:
+            try:
+                float(w)
+                return True
+
+            except:
+                return False
+
+    return False
+    '''
+
+    l = lines[i]
+
+    line_start = 0
+
+    if l.startswith(', '):
+        line_start += 2
+
+    line_end = len(l) - 1
+    line_end_new = l.find(' ', line_start)  # comma's always have a space before them
+
+    if line_end_new != -1:
+        line_end = line_end_new
+
+    try:
+        float(l[line_start:line_end])  # works for a float or int
+        return True
+    except:
+        return False
+
+
+class vrmlNode(object):
+    __slots__ = ('id',
+                 'fields',
+                 'proto_node',
+                 'proto_field_defs',
+                 'proto_fields',
+                 'node_type',
+                 'parent',
+                 'children',
+                 'parent',
+                 'array_data',
+                 'reference',
+                 'lineno',
+                 'filename',
+                 'blendObject',
+                 'DEF_NAMESPACE',
+                 'ROUTE_IPO_NAMESPACE',
+                 'PROTO_NAMESPACE',
+                 'x3dNode')
+
+    def __init__(self, parent, node_type, lineno):
+        self.id = None
+        self.node_type = node_type
+        self.parent = parent
+        self.blendObject = None
+        self.x3dNode = None  # for x3d import only
+        if parent:
+            parent.children.append(self)
+
+        self.lineno = lineno
+
+        # This is only set from the root nodes.
+        # Having a filename also denotes a root node
+        self.filename = None
+        self.proto_node = None  # proto field definition eg: "field SFColor seatColor .6 .6 .1"
+
+        # Store in the root node because each inline file needs its own root node and its own namespace
+        self.DEF_NAMESPACE = None
+        self.ROUTE_IPO_NAMESPACE = None
+        '''
+        self.FIELD_NAMESPACE = None
+        '''
+
+        self.PROTO_NAMESPACE = None
+
+        self.reference = None
+
+        if node_type == NODE_REFERENCE:
+            # For references, only the parent and ID are needed
+            # the reference its self is assigned on parsing
+            return
+
+        self.fields = []  # fields have no order, in some cases rool level values are not unique so dont use a dict
+
+        self.proto_field_defs = []  # proto field definition eg: "field SFColor seatColor .6 .6 .1"
+        self.proto_fields = []  # proto field usage "diffuseColor IS seatColor"
+        self.children = []
+        self.array_data = []  # use for arrays of data - should only be for NODE_ARRAY types
+
+    # Only available from the root node
+    '''
+    def getFieldDict(self):
+        if self.FIELD_NAMESPACE != None:
+            return self.FIELD_NAMESPACE
+        else:
+            return self.parent.getFieldDict()
+    '''
+    def getProtoDict(self):
+        if self.PROTO_NAMESPACE != None:
+            return self.PROTO_NAMESPACE
+        else:
+            return self.parent.getProtoDict()
+
+    def getDefDict(self):
+        if self.DEF_NAMESPACE != None:
+            return self.DEF_NAMESPACE
+        else:
+            return self.parent.getDefDict()
+
+    def getRouteIpoDict(self):
+        if self.ROUTE_IPO_NAMESPACE != None:
+            return self.ROUTE_IPO_NAMESPACE
+        else:
+            return self.parent.getRouteIpoDict()
+
+    def setRoot(self, filename):
+        self.filename = filename
+        # self.FIELD_NAMESPACE =        {}
+        self.DEF_NAMESPACE = {}
+        self.ROUTE_IPO_NAMESPACE = {}
+        self.PROTO_NAMESPACE = {}
+
+    def isRoot(self):
+        if self.filename == None:
+            return False
+        else:
+            return True
+
+    def getFilename(self):
+        if self.filename:
+            return self.filename
+        elif self.parent:
+            return self.parent.getFilename()
+        else:
+            return None
+
+    def getRealNode(self):
+        if self.reference:
+            return self.reference
+        else:
+            return self
+
+    def getSpec(self):
+        self_real = self.getRealNode()
+        try:
+            return self_real.id[-1]  # its possible this node has no spec
+        except:
+            return None
+
+    def findSpecRecursive(self, spec):
+        self_real = self.getRealNode()
+        if spec == self_real.getSpec():
+            return self
+
+        for child in self_real.children:
+            if child.findSpecRecursive(spec):
+                return child
+
+        return None
+
+    def getPrefix(self):
+        if self.id:
+            return self.id[0]
+        return None
+
+    def getSpecialTypeName(self, typename):
+        self_real = self.getRealNode()
+        try:
+            return self_real.id[list(self_real.id).index(typename) + 1]
+        except:
+            return None
+
+    def getDefName(self):
+        return self.getSpecialTypeName('DEF')
+
+    def getProtoName(self):
+        return self.getSpecialTypeName('PROTO')
+
+    def getExternprotoName(self):
+        return self.getSpecialTypeName('EXTERNPROTO')
+
+    def getChildrenBySpec(self, node_spec):  # spec could be Transform, Shape, Appearance
+        self_real = self.getRealNode()
+        # using getSpec functions allows us to use the spec of USE children that dont have their spec in their ID
+        if type(node_spec) == str:
+            return [child for child in self_real.children if child.getSpec() == node_spec]
+        else:
+            # Check inside a list of optional types
+            return [child for child in self_real.children if child.getSpec() in node_spec]
+
+    def getChildBySpec(self, node_spec):  # spec could be Transform, Shape, Appearance
+        # Use in cases where there is only ever 1 child of this type
+        ls = self.getChildrenBySpec(node_spec)
+        if ls:
+            return ls[0]
+        else:
+            return None
+
+    def getChildrenByName(self, node_name):  # type could be geometry, children, appearance
+        self_real = self.getRealNode()
+        return [child for child in self_real.children if child.id if child.id[0] == node_name]
+
+    def getChildByName(self, node_name):
+        self_real = self.getRealNode()
+        for child in self_real.children:
+            if child.id and child.id[0] == node_name:  # and child.id[-1]==node_spec:
+                return child
+
+    def getSerialized(self, results, ancestry):
+        ''' Return this node and all its children in a flat list '''
+        ancestry = ancestry[:]  # always use a copy
+
+        # self_real = self.getRealNode()
+
+        results.append((self, tuple(ancestry)))
+        ancestry.append(self)
+        for child in self.getRealNode().children:
+            if child not in ancestry:
+                # We dont want to load proto's, they are only references
+                # We could enforce this elsewhere
+
+                # Only add this in a very special case
+                # where the parent of this object is not the real parent
+                # - In this case we have added the proto as a child to a node instancing it.
+                # This is a bit arbitary, but its how Proto's are done with this importer.
+                if child.getProtoName() == None and child.getExternprotoName() == None:
+                    child.getSerialized(results, ancestry)
+                else:
+
+                    if DEBUG:
+                        print('getSerialized() is proto:', child.getProtoName(), child.getExternprotoName(), self.getSpec())
+
+                    self_spec = self.getSpec()
+
+                    if child.getProtoName() == self_spec or child.getExternprotoName() == self_spec:
+                        #if DEBUG:
+                        #    "FoundProto!"
+                        child.getSerialized(results, ancestry)
+
+        return results
+
+    def searchNodeTypeID(self, node_spec, results):
+        self_real = self.getRealNode()
+        # print(self.lineno, self.id)
+        if self_real.id and self_real.id[-1] == node_spec:  # use last element, could also be only element
+            results.append(self_real)
+        for child in self_real.children:
+            child.searchNodeTypeID(node_spec, results)
+        return results
+
+    def getFieldName(self, field, ancestry, AS_CHILD=False):
+        self_real = self.getRealNode()  # incase we're an instance
+
+        for f in self_real.fields:
+            # print(f)
+            if f and f[0] == field:
+                # print('\tfound field', f)
+
+                if len(f) >= 3 and f[1] == 'IS':  # eg: 'diffuseColor IS legColor'
+                    field_id = f[2]
+
+                    # print("\n\n\n\n\n\nFOND IS!!!")
+                    f_proto_lookup = None
+                    f_proto_child_lookup = None
+                    i = len(ancestry)
+                    while i:
+                        i -= 1
+                        node = ancestry[i]
+                        node = node.getRealNode()
+
+                        # proto settings are stored in "self.proto_node"
+                        if node.proto_node:
+                            # Get the default value from the proto, this can be overwridden by the proto instace
+                            # 'field SFColor legColor .8 .4 .7'
+                            if AS_CHILD:
+                                for child in node.proto_node.children:
+                                    #if child.id  and  len(child.id) >= 3  and child.id[2]==field_id:
+                                    if child.id and ('point' in child.id or 'points' in child.id):
+                                        f_proto_child_lookup = child
+
+                            else:
+                                for f_def in node.proto_node.proto_field_defs:
+                                    if len(f_def) >= 4:
+                                        if f_def[0] == 'field' and f_def[2] == field_id:
+                                            f_proto_lookup = f_def[3:]
+
+                        # Node instance, Will be 1 up from the proto-node in the ancestry list. but NOT its parent.
+                        # This is the setting as defined by the instance, including this setting is optional,
+                        # and will override the default PROTO value
+                        # eg: 'legColor 1 0 0'
+                        if AS_CHILD:
+                            for child in node.children:
+                                if child.id and child.id[0] == field_id:
+                                    f_proto_child_lookup = child
+                        else:
+                            for f_def in node.fields:
+                                if len(f_def) >= 2:
+                                    if f_def[0] == field_id:
+                                        if DEBUG:
+                                            print("getFieldName(), found proto", f_def)
+                                        f_proto_lookup = f_def[1:]
+
+                    if AS_CHILD:
+                        if f_proto_child_lookup:
+                            if DEBUG:
+                                print("getFieldName() - AS_CHILD=True, child found")
+                                print(f_proto_child_lookup)
+                        return f_proto_child_lookup
+                    else:
+                        return f_proto_lookup
+                else:
+                    if AS_CHILD:
+                        return None
+                    else:
+                        # Not using a proto
+                        return f[1:]
+        # print('\tfield not found', field)
+
+        # See if this is a proto name
+        if AS_CHILD:
+            child_array = None
+            for child in self_real.children:
+                if child.id and len(child.id) == 1 and child.id[0] == field:
+                    return child
+
+        return None
+
+    def getFieldAsInt(self, field, default, ancestry):
+        self_real = self.getRealNode()  # incase we're an instance
+
+        f = self_real.getFieldName(field, ancestry)
+        if f == None:
+            return default
+        if ',' in f:
+            f = f[:f.index(',')]  # strip after the comma
+
+        if len(f) != 1:
+            print('\t"%s" wrong length for int conversion for field "%s"' % (f, field))
+            return default
+
+        try:
+            return int(f[0])
+        except:
+            print('\tvalue "%s" could not be used as an int for field "%s"' % (f[0], field))
+            return default
+
+    def getFieldAsFloat(self, field, default, ancestry):
+        self_real = self.getRealNode()  # incase we're an instance
+
+        f = self_real.getFieldName(field, ancestry)
+        if f == None:
+            return default
+        if ',' in f:
+            f = f[:f.index(',')]  # strip after the comma
+
+        if len(f) != 1:
+            print('\t"%s" wrong length for float conversion for field "%s"' % (f, field))
+            return default
+
+        try:
+            return float(f[0])
+        except:
+            print('\tvalue "%s" could not be used as a float for field "%s"' % (f[0], field))
+            return default
+
+    def getFieldAsFloatTuple(self, field, default, ancestry):
+        self_real = self.getRealNode()  # incase we're an instance
+
+        f = self_real.getFieldName(field, ancestry)
+        if f == None:
+            return default
+        # if ',' in f: f = f[:f.index(',')] # strip after the comma
+
+        if len(f) < 1:
+            print('"%s" wrong length for float tuple conversion for field "%s"' % (f, field))
+            return default
+
+        ret = []
+        for v in f:
+            if v != ',':
+                try:
+                    ret.append(float(v))
+                except:
+                    break  # quit of first non float, perhaps its a new field name on the same line? - if so we are going to ignore it :/ TODO
+        # print(ret)
+
+        if ret:
+            return ret
+        if not ret:
+            print('\tvalue "%s" could not be used as a float tuple for field "%s"' % (f, field))
+            return default
+
+    def getFieldAsBool(self, field, default, ancestry):
+        self_real = self.getRealNode()  # incase we're an instance
+
+        f = self_real.getFieldName(field, ancestry)
+        if f == None:
+            return default
+        if ',' in f:
+            f = f[:f.index(',')]  # strip after the comma
+
+        if len(f) != 1:
+            print('\t"%s" wrong length for bool conversion for field "%s"' % (f, field))
+            return default
+
+        if f[0].upper() == '"TRUE"' or f[0].upper() == 'TRUE':
+            return True
+        elif f[0].upper() == '"FALSE"' or f[0].upper() == 'FALSE':
+            return False
+        else:
+            print('\t"%s" could not be used as a bool for field "%s"' % (f[1], field))
+            return default
+
+    def getFieldAsString(self, field, default, ancestry):
+        self_real = self.getRealNode()  # incase we're an instance
+
+        f = self_real.getFieldName(field, ancestry)
+        if f == None:
+            return default
+        if len(f) < 1:
+            print('\t"%s" wrong length for string conversion for field "%s"' % (f, field))
+            return default
+
+        if len(f) > 1:
+            # String may contain spaces
+            st = ' '.join(f)
+        else:
+            st = f[0]
+
+        # X3D HACK
+        if self.x3dNode:
+            return st
+
+        if st[0] == '"' and st[-1] == '"':
+            return st[1:-1]
+        else:
+            print('\tvalue "%s" could not be used as a string for field "%s"' % (f[0], field))
+            return default
+
+    def getFieldAsArray(self, field, group, ancestry):
+        '''
+        For this parser arrays are children
+        '''
+
+        def array_as_number(array_string):
+            array_data = []
+            try:
+                array_data = [int(val) for val in array_string]
+            except:
+                try:
+                    array_data = [float(val) for val in array_string]
+                except:
+                    print('\tWarning, could not parse array data from field')
+
+            return array_data
+
+        self_real = self.getRealNode()  # incase we're an instance
+
+        child_array = self_real.getFieldName(field, ancestry, True)
+
+        #if type(child_array)==list: # happens occasionaly
+        #   array_data = child_array
+
+        if child_array is None:
+            # For x3d, should work ok with vrml too
+            # for x3d arrays are fields, vrml they are nodes, annoying but not tooo bad.
+            data_split = self.getFieldName(field, ancestry)
+            if not data_split:
+                return []
+            array_data = ' '.join(data_split)
+            if array_data == None:
+                return []
+
+            array_data = array_data.replace(',', ' ')
+            data_split = array_data.split()
+
+            array_data = array_as_number(data_split)
+
+        elif type(child_array) == list:
+            # x3d creates these
+            data_split = [w.strip(",") for w in child_array]
+
+            array_data = array_as_number(data_split)
+        else:
+            # print(child_array)
+            # Normal vrml
+            array_data = child_array.array_data
+
+        # print('array_data', array_data)
+        if group == -1 or len(array_data) == 0:
+            return array_data
+
+        # We want a flat list
+        flat = True
+        for item in array_data:
+            if type(item) == list:
+                flat = False
+                break
+
+        # make a flat array
+        if flat:
+            flat_array = array_data  # we are alredy flat.
+        else:
+            flat_array = []
+
+            def extend_flat(ls):
+                for item in ls:
+                    if type(item) == list:
+                        extend_flat(item)
+                    else:
+                        flat_array.append(item)
+
+            extend_flat(array_data)
+
+        # We requested a flat array
+        if group == 0:
+            return flat_array
+
+        new_array = []
+        sub_array = []
+
+        for item in flat_array:
+            sub_array.append(item)
+            if len(sub_array) == group:
+                new_array.append(sub_array)
+                sub_array = []
+
+        if sub_array:
+            print('\twarning, array was not aligned to requested grouping', group, 'remaining value', sub_array)
+
+        return new_array
+
+    def getFieldAsStringArray(self, field, ancestry):
+        '''
+        Get a list of strings
+        '''
+        self_real = self.getRealNode()  # incase we're an instance
+
+        child_array = None
+        for child in self_real.children:
+            if child.id and len(child.id) == 1 and child.id[0] == field:
+                child_array = child
+                break
+        if not child_array:
+            return []
+
+        # each string gets its own list, remove ""'s
+        try:
+            new_array = [f[0][1:-1] for f in child_array.fields]
+        except:
+            print('\twarning, string array could not be made')
+            new_array = []
+
+        return new_array
+
+    def getLevel(self):
+        # Ignore self_real
+        level = 0
+        p = self.parent
+        while p:
+            level += 1
+            p = p.parent
+            if not p:
+                break
+
+        return level
+
+    def __repr__(self):
+        level = self.getLevel()
+        ind = '  ' * level
+        if self.node_type == NODE_REFERENCE:
+            brackets = ''
+        elif self.node_type == NODE_NORMAL:
+            brackets = '{}'
+        else:
+            brackets = '[]'
+
+        if brackets:
+            text = ind + brackets[0] + '\n'
+        else:
+            text = ''
+
+        text += ind + 'ID: ' + str(self.id) + ' ' + str(level) + (' lineno %d\n' % self.lineno)
+
+        if self.node_type == NODE_REFERENCE:
+            text += ind + "(reference node)\n"
+            return text
+
+        if self.proto_node:
+            text += ind + 'PROTO NODE...\n'
+            text += str(self.proto_node)
+            text += ind + 'PROTO NODE_DONE\n'
+
+        text += ind + 'FIELDS:' + str(len(self.fields)) + '\n'
+
+        for i, item in enumerate(self.fields):
+            text += ind + 'FIELD:\n'
+            text += ind + str(item) + '\n'
+
+        text += ind + 'PROTO_FIELD_DEFS:' + str(len(self.proto_field_defs)) + '\n'
+
+        for i, item in enumerate(self.proto_field_defs):
+            text += ind + 'PROTO_FIELD:\n'
+            text += ind + str(item) + '\n'
+
+        text += ind + 'ARRAY: ' + str(len(self.array_data)) + ' ' + str(self.array_data) + '\n'
+        #text += ind + 'ARRAY: ' + str(len(self.array_data)) + '[...] \n'
+
+        text += ind + 'CHILDREN: ' + str(len(self.children)) + '\n'
+        for i, child in enumerate(self.children):
+            text += ind + ('CHILD%d:\n' % i)
+            text += str(child)
+
+        text += '\n' + ind + brackets[1]
+
+        return text
+
+    def parse(self, i, IS_PROTO_DATA=False):
+        new_i = self.__parse(i, IS_PROTO_DATA)
+
+        # print(self.id, self.getFilename())
+
+        # Check if this node was an inline or externproto
+
+        url_ls = []
+
+        if self.node_type == NODE_NORMAL and self.getSpec() == 'Inline':
+            ancestry = []  # Warning! - PROTO's using this wont work at all.
+            url = self.getFieldAsString('url', None, ancestry)
+            if url:
+                url_ls = [(url, None)]
+            del ancestry
+
+        elif self.getExternprotoName():
+            # externproto
+            url_ls = []
+            for f in self.fields:
+
+                if type(f) == str:
+                    f = [f]
+
+                for ff in f:
+                    for f_split in ff.split('"'):
+                        # print(f_split)
+                        # "someextern.vrml#SomeID"
+                        if '#' in f_split:
+
+                            f_split, f_split_id = f_split.split('#')  # there should only be 1 # anyway
+
+                            url_ls.append((f_split, f_split_id))
+                        else:
+                            url_ls.append((f_split, None))
+
+        # Was either an Inline or an EXTERNPROTO
+        if url_ls:
+
+            # print(url_ls)
+
+            for url, extern_key in url_ls:
+                print(url)
+                urls = []
+                urls.append(url)
+                urls.append(bpy.path.resolve_ncase(urls[-1]))
+
+                urls.append(dirName(self.getFilename()) + url)
+                urls.append(bpy.path.resolve_ncase(urls[-1]))
+
+                urls.append(dirName(self.getFilename()) + baseName(url))
+                urls.append(bpy.path.resolve_ncase(urls[-1]))
+
+                try:
+                    url = [url for url in urls if exists(url)][0]
+                    url_found = True
+                except:
+                    url_found = False
+
+                if not url_found:
+                    print('\tWarning: Inline URL could not be found:', url)
+                else:
+                    if url == self.getFilename():
+                        print('\tWarning: cant Inline yourself recursively:', url)
+                    else:
+
+                        try:
+                            data = gzipOpen(url)
+                        except:
+                            print('\tWarning: cant open the file:', url)
+                            data = None
+
+                        if data:
+                            # Tricky - inline another VRML
+                            print('\tLoading Inline:"%s"...' % url)
+
+                            # Watch it! - backup lines
+                            lines_old = lines[:]
+
+                            lines[:] = vrmlFormat(data)
+
+                            lines.insert(0, '{')
+                            lines.insert(0, 'root_node____')
+                            lines.append('}')
+                            '''
+                            ff = open('/tmp/test.txt', 'w')
+                            ff.writelines([l+'\n' for l in lines])
+                            '''
+
+                            child = vrmlNode(self, NODE_NORMAL, -1)
+                            child.setRoot(url)  # initialized dicts
+                            child.parse(0)
+
+                            # if self.getExternprotoName():
+                            if self.getExternprotoName():
+                                if not extern_key:  # if none is spesified - use the name
+                                    extern_key = self.getSpec()
+
+                                if extern_key:
+
+                                    self.children.remove(child)
+                                    child.parent = None
+
+                                    extern_child = child.findSpecRecursive(extern_key)
+
+                                    if extern_child:
+                                        self.children.append(extern_child)
+                                        extern_child.parent = self
+
+                                        if DEBUG:
+                                            print("\tEXTERNPROTO ID found!:", extern_key)
+                                    else:
+                                        print("\tEXTERNPROTO ID not found!:", extern_key)
+
+                            # Watch it! - restore lines
+                            lines[:] = lines_old
+
+        return new_i
+
+    def __parse(self, i, IS_PROTO_DATA=False):
+        '''
+        print('parsing at', i, end="")
+        print(i, self.id, self.lineno)
+        '''
+        l = lines[i]
+
+        if l == '[':
+            # An anonymous list
+            self.id = None
+            i += 1
+        else:
+            words = []
+
+            node_type, new_i = is_nodeline(i, words)
+            if not node_type:  # fail for parsing new node.
+                print("Failed to parse new node")
+                raise ValueError
+
+            if self.node_type == NODE_REFERENCE:
+                # Only assign the reference and quit
+                key = words[words.index('USE') + 1]
+                self.id = (words[0],)
+
+                self.reference = self.getDefDict()[key]
+                return new_i
+
+            self.id = tuple(words)
+
+            # fill in DEF/USE
+            key = self.getDefName()
+            if key != None:
+                self.getDefDict()[key] = self
+
+            key = self.getProtoName()
+            if not key:
+                key = self.getExternprotoName()
+
+            proto_dict = self.getProtoDict()
+            if key != None:
+                proto_dict[key] = self
+
+                # Parse the proto nodes fields
+                self.proto_node = vrmlNode(self, NODE_ARRAY, new_i)
+                new_i = self.proto_node.parse(new_i)
+
+                self.children.remove(self.proto_node)
+
+                # print(self.proto_node)
+
+                new_i += 1  # skip past the {
+
+            else:  # If we're a proto instance, add the proto node as our child.
+                spec = self.getSpec()
+                try:
+                    self.children.append(proto_dict[spec])
+                    #pass
+                except:
+                    pass
+
+                del spec
+
+            del proto_dict, key
+
+            i = new_i
+
+        # print(self.id)
+        ok = True
+        while ok:
+            if i >= len(lines):
+                return len(lines) - 1
+
+            l = lines[i]
+            # print('\tDEBUG:', i, self.node_type, l)
+            if l == '':
+                i += 1
+                continue
+
+            if l == '}':
+                if self.node_type != NODE_NORMAL:  # also ends proto nodes, we may want a type for these too.
+                    print('wrong node ending, expected an } ' + str(i) + ' ' + str(self.node_type))
+                    if DEBUG:
+                        raise ValueError
+                ### print("returning", i)
+                return i + 1
+            if l == ']':
+                if self.node_type != NODE_ARRAY:
+                    print('wrong node ending, expected a ] ' + str(i) + ' ' + str(self.node_type))
+                    if DEBUG:
+                        raise ValueError
+                ### print("returning", i)
+                return i + 1
+
+            node_type, new_i = is_nodeline(i, [])
+            if node_type:  # check text\n{
+                child = vrmlNode(self, node_type, i)
+                i = child.parse(i)
+
+            elif l == '[':  # some files have these anonymous lists
+                child = vrmlNode(self, NODE_ARRAY, i)
+                i = child.parse(i)
+
+            elif is_numline(i):
+                l_split = l.split(',')
+
+                values = None
+                # See if each item is a float?
+
+                for num_type in (int, float):
+                    try:
+                        values = [num_type(v) for v in l_split]
+                        break
+                    except:
+                        pass
+
+                    try:
+                        values = [[num_type(v) for v in segment.split()] for segment in l_split]
+                        break
+                    except:
+                        pass
+
+                if values == None:  # dont parse
+                    values = l_split
+
+                # This should not extend over multiple lines however it is possible
+                # print(self.array_data)
+                if values:
+                    self.array_data.extend(values)
+                i += 1
+            else:
+                words = l.split()
+                if len(words) > 2 and words[1] == 'USE':
+                    vrmlNode(self, NODE_REFERENCE, i)
+                else:
+
+                    # print("FIELD", i, l)
+                    #
+                    #words = l.split()
+                    ### print('\t\ttag', i)
+                    # this is a tag/
+                    # print(words, i, l)
+                    value = l
+                    # print(i)
+                    # javastrips can exist as values.
+                    quote_count = l.count('"')
+                    if quote_count % 2:  # odd number?
+                        # print('MULTILINE')
+                        while 1:
+                            i += 1
+                            l = lines[i]
+                            quote_count = l.count('"')
+                            if quote_count % 2:  # odd number?
+                                value += '\n' + l[:l.rfind('"')]
+                                break  # assume
+                            else:
+                                value += '\n' + l
+
+                    value_all = value.split()
+
+                    def iskey(k):
+                        if k[0] != '"' and k[0].isalpha() and k.upper() not in ('TRUE', 'FALSE'):
+                            return True
+                        return False
+
+                    def split_fields(value):
+                        '''
+                        key 0.0 otherkey 1,2,3 opt1 opt1 0.0
+                            -> [key 0.0], [otherkey 1,2,3], [opt1 opt1 0.0]
+                        '''
+                        field_list = []
+                        field_context = []
+
+                        for j in range(len(value)):
+                            if iskey(value[j]):
+                                if field_context:
+                                    # this IS a key but the previous value was not a key, ot it was a defined field.
+                                    if (not iskey(field_context[-1])) or ((len(field_context) == 3 and field_context[1] == 'IS')):
+                                        field_list.append(field_context)
+
+                                        field_context = [value[j]]
+                                    else:
+                                        # The last item was not a value, multiple keys are needed in some cases.
+                                        field_context.append(value[j])
+                                else:
+                                    # Is empty, just add this on
+                                    field_context.append(value[j])
+                            else:
+                                # Add a value to the list
+                                field_context.append(value[j])
+
+                        if field_context:
+                            field_list.append(field_context)
+
+                        return field_list
+
+                    for value in split_fields(value_all):
+                        # Split
+
+                        if value[0] == 'field':
+                            # field SFFloat creaseAngle 4
+                            self.proto_field_defs.append(value)
+                        else:
+                            self.fields.append(value)
+                i += 1
+
+
+def gzipOpen(path):
+    try:
+        import gzip
+    except:
+        gzip = None
+
+    data = None
+    if gzip:
+        try:
+            data = gzip.open(path, 'r').read()
+        except:
+            pass
+    else:
+        print('\tNote, gzip module could not be imported, compressed files will fail to load')
+
+    if data == None:
+        try:
+            data = open(path, 'rU').read()
+        except:
+            pass
+
+    return data
+
+
+def vrml_parse(path):
+    '''
+    Sets up the root node and returns it so load_web3d() can deal with the blender side of things.
+    Return root (vrmlNode, '') or (None, 'Error String')
+    '''
+    data = gzipOpen(path)
+
+    if data == None:
+        return None, 'Failed to open file: ' + path
+
+    # Stripped above
+    lines[:] = vrmlFormat(data)
+
+    lines.insert(0, '{')
+    lines.insert(0, 'dymmy_node')
+    lines.append('}')
+    # Use for testing our parsed output, so we can check on line numbers.
+
+    '''
+    ff = open('/tmp/test.txt', 'w')
+    ff.writelines([l+'\n' for l in lines])
+    ff.close()
+    '''
+
+    # Now evaluate it
+    node_type, new_i = is_nodeline(0, [])
+    if not node_type:
+        return None, 'Error: VRML file has no starting Node'
+
+    # Trick to make sure we get all root nodes.
+    lines.insert(0, '{')
+    lines.insert(0, 'root_node____')  # important the name starts with an ascii char
+    lines.append('}')
+
+    root = vrmlNode(None, NODE_NORMAL, -1)
+    root.setRoot(path)  # we need to set the root so we have a namespace and know the path incase of inlineing
+
+    # Parse recursively
+    root.parse(0)
+
+    # This prints a load of text
+    if DEBUG:
+        print(root)
+
+    return root, ''
+
+
+# ====================== END VRML
+
+# ====================== X3d Support
+
+# Sane as vrml but replace the parser
+class x3dNode(vrmlNode):
+    def __init__(self, parent, node_type, x3dNode):
+        vrmlNode.__init__(self, parent, node_type, -1)
+        self.x3dNode = x3dNode
+
+    def parse(self, IS_PROTO_DATA=False):
+        # print(self.x3dNode.tagName)
+
+        define = self.x3dNode.getAttributeNode('DEF')
+        if define:
+            self.getDefDict()[define.value] = self
+        else:
+            use = self.x3dNode.getAttributeNode('USE')
+            if use:
+                try:
+                    self.reference = self.getDefDict()[use.value]
+                    self.node_type = NODE_REFERENCE
+                except:
+                    print('\tWarning: reference', use.value, 'not found')
+                    self.parent.children.remove(self)
+
+                return
+
+        for x3dChildNode in self.x3dNode.childNodes:
+            if x3dChildNode.nodeType in (x3dChildNode.TEXT_NODE, x3dChildNode.COMMENT_NODE, x3dChildNode.CDATA_SECTION_NODE):
+                continue
+
+            node_type = NODE_NORMAL
+            # print(x3dChildNode, dir(x3dChildNode))
+            if x3dChildNode.getAttributeNode('USE'):
+                node_type = NODE_REFERENCE
+
+            child = x3dNode(self, node_type, x3dChildNode)
+            child.parse()
+
+        # TODO - x3d Inline
+
+    def getSpec(self):
+        return self.x3dNode.tagName  # should match vrml spec
+
+    def getDefName(self):
+        data = self.x3dNode.getAttributeNode('DEF')
+        if data:
+            data.value  # XXX, return??
+        return None
+
+    # Other funcs operate from vrml, but this means we can wrap XML fields, still use nice utility funcs
+    # getFieldAsArray getFieldAsBool etc
+    def getFieldName(self, field, ancestry, AS_CHILD=False):
+        # ancestry and AS_CHILD are ignored, only used for VRML now
+
+        self_real = self.getRealNode()  # incase we're an instance
+        field_xml = self.x3dNode.getAttributeNode(field)
+        if field_xml:
+            value = field_xml.value
+
+            # We may want to edit. for x3d spesific stuff
+            # Sucks a bit to return the field name in the list but vrml excepts this :/
+            return value.split()
+        else:
+            return None
+
+
+def x3d_parse(path):
+    '''
+    Sets up the root node and returns it so load_web3d() can deal with the blender side of things.
+    Return root (x3dNode, '') or (None, 'Error String')
+    '''
+
+    try:
+        import xml.dom.minidom
+    except:
+        return None, 'Error, import XML parsing module (xml.dom.minidom) failed, install python'
+
+    '''
+    try:    doc = xml.dom.minidom.parse(path)
+    except: return None, 'Could not parse this X3D file, XML error'
+    '''
+
+    # Could add a try/except here, but a console error is more useful.
+    data = gzipOpen(path)
+
+    if data == None:
+        return None, 'Failed to open file: ' + path
+
+    doc = xml.dom.minidom.parseString(data)
+
+    try:
+        x3dnode = doc.getElementsByTagName('X3D')[0]
+    except:
+        return None, 'Not a valid x3d document, cannot import'
+
+    root = x3dNode(None, NODE_NORMAL, x3dnode)
+    root.setRoot(path)  # so images and Inline's we load have a relative path
+    root.parse()
+
+    return root, ''
+
+## f = open('/_Cylinder.wrl', 'r')
+# f = open('/fe/wrl/Vrml/EGS/TOUCHSN.WRL', 'r')
+# vrml_parse('/fe/wrl/Vrml/EGS/TOUCHSN.WRL')
+#vrml_parse('/fe/wrl/Vrml/EGS/SCRIPT.WRL')
+'''
+import os
+files = os.popen('find /fe/wrl -iname "*.wrl"').readlines()
+files.sort()
+tot = len(files)
+for i, f in enumerate(files):
+    #if i < 801:
+    #   continue
+
+    f = f.strip()
+    print(f, i, tot)
+    vrml_parse(f)
+'''
+
+# NO BLENDER CODE ABOVE THIS LINE.
+# -----------------------------------------------------------------------------------
+import bpy
+import image_utils
+# import BPyImage
+# import BPySys
+# reload(BPySys)
+# reload(BPyImage)
+# import Blender
+# from Blender import Texture, Material, Mathutils, Mesh, Types, Window
+from mathutils import Vector, Matrix
+
+RAD_TO_DEG = 57.29578
+
+GLOBALS = {'CIRCLE_DETAIL': 16}
+
+
+def translateRotation(rot):
+    ''' axis, angle '''
+    return Matrix.Rotation(rot[3], 4, Vector(rot[:3]))
+
+
+def translateScale(sca):
+    mat = Matrix()  # 4x4 default
+    mat[0][0] = sca[0]
+    mat[1][1] = sca[1]
+    mat[2][2] = sca[2]
+    return mat
+
+
+def translateTransform(node, ancestry):
+    cent = node.getFieldAsFloatTuple('center', None, ancestry)  # (0.0, 0.0, 0.0)
+    rot = node.getFieldAsFloatTuple('rotation', None, ancestry)  # (0.0, 0.0, 1.0, 0.0)
+    sca = node.getFieldAsFloatTuple('scale', None, ancestry)  # (1.0, 1.0, 1.0)
+    scaori = node.getFieldAsFloatTuple('scaleOrientation', None, ancestry)  # (0.0, 0.0, 1.0, 0.0)
+    tx = node.getFieldAsFloatTuple('translation', None, ancestry)  # (0.0, 0.0, 0.0)
+
+    if cent:
+        cent_mat = Matrix.Translation(Vector(cent)).resize4x4()
+        cent_imat = cent_mat.copy().invert()
+    else:
+        cent_mat = cent_imat = None
+
+    if rot:
+        rot_mat = translateRotation(rot)
+    else:
+        rot_mat = None
+
+    if sca:
+        sca_mat = translateScale(sca)
+    else:
+        sca_mat = None
+
+    if scaori:
+        scaori_mat = translateRotation(scaori)
+        scaori_imat = scaori_mat.copy().invert()
+    else:
+        scaori_mat = scaori_imat = None
+
+    if tx:
+        tx_mat = Matrix.Translation(Vector(tx)).resize4x4()
+    else:
+        tx_mat = None
+
+    new_mat = Matrix()
+
+    mats = [tx_mat, cent_mat, rot_mat, scaori_mat, sca_mat, scaori_imat, cent_imat]
+    for mtx in mats:
+        if mtx:
+            new_mat = new_mat * mtx
+
+    return new_mat
+
+
+def translateTexTransform(node, ancestry):
+    cent = node.getFieldAsFloatTuple('center', None, ancestry)  # (0.0, 0.0)
+    rot = node.getFieldAsFloat('rotation', None, ancestry)  # 0.0
+    sca = node.getFieldAsFloatTuple('scale', None, ancestry)  # (1.0, 1.0)
+    tx = node.getFieldAsFloatTuple('translation', None, ancestry)  # (0.0, 0.0)
+
+    if cent:
+        # cent is at a corner by default
+        cent_mat = Matrix.Translation(Vector(cent).resize3D()).resize4x4()
+        cent_imat = cent_mat.copy().invert()
+    else:
+        cent_mat = cent_imat = None
+
+    if rot:
+        rot_mat = Matrix.Rotation(rot, 4, 'Z')  # translateRotation(rot)
+    else:
+        rot_mat = None
+
+    if sca:
+        sca_mat = translateScale((sca[0], sca[1], 0.0))
+    else:
+        sca_mat = None
+
+    if tx:
+        tx_mat = Matrix.Translation(Vector(tx).resize3D()).resize4x4()
+    else:
+        tx_mat = None
+
+    new_mat = Matrix()
+
+    # as specified in VRML97 docs
+    mats = [cent_imat, sca_mat, rot_mat, cent_mat, tx_mat]
+
+    for mtx in mats:
+        if mtx:
+            new_mat = new_mat * mtx
+
+    return new_mat
+
+
+# 90d X rotation
+import math
+MATRIX_Z_TO_Y = Matrix.Rotation(math.pi / 2.0, 4, 'X')
+
+
+def getFinalMatrix(node, mtx, ancestry):
+
+    transform_nodes = [node_tx for node_tx in ancestry if node_tx.getSpec() == 'Transform']
+    if node.getSpec() == 'Transform':
+        transform_nodes.append(node)
+    transform_nodes.reverse()
+
+    if mtx is None:
+        mtx = Matrix()
+
+    for node_tx in transform_nodes:
+        mat = translateTransform(node_tx, ancestry)
+        mtx = mat * mtx
+
+    # worldspace matrix
+    mtx = MATRIX_Z_TO_Y * mtx
+
+    return mtx
+
+
+def importMesh_IndexedFaceSet(geom, bpyima, ancestry):
+    # print(geom.lineno, geom.id, vrmlNode.DEF_NAMESPACE.keys())
+
+    ccw = geom.getFieldAsBool('ccw', True, ancestry)
+    ifs_colorPerVertex = geom.getFieldAsBool('colorPerVertex', True, ancestry)  # per vertex or per face
+    ifs_normalPerVertex = geom.getFieldAsBool('normalPerVertex', True, ancestry)
+
+    # This is odd how point is inside Coordinate
+
+    # VRML not x3d
+    #coord = geom.getChildByName('coord') # 'Coordinate'
+
+    coord = geom.getChildBySpec('Coordinate')  # works for x3d and vrml
+
+    if coord:
+        ifs_points = coord.getFieldAsArray('point', 3, ancestry)
+    else:
+        coord = []
+
+    if not coord:
+        print('\tWarnint: IndexedFaceSet has no points')
+        return None, ccw
+
+    ifs_faces = geom.getFieldAsArray('coordIndex', 0, ancestry)
+
+    coords_tex = None
+    if ifs_faces:  # In rare cases this causes problems - no faces but UVs???
+
+        # WORKS - VRML ONLY
+        # coords_tex = geom.getChildByName('texCoord')
+        coords_tex = geom.getChildBySpec('TextureCoordinate')
+
+        if coords_tex:
+            ifs_texpoints = coords_tex.getFieldAsArray('point', 2, ancestry)
+            ifs_texfaces = geom.getFieldAsArray('texCoordIndex', 0, ancestry)
+
+            if not ifs_texpoints:
+                # IF we have no coords, then dont bother
+                coords_tex = None
+
+    # WORKS - VRML ONLY
+    # vcolor = geom.getChildByName('color')
+    vcolor = geom.getChildBySpec('Color')
+    vcolor_spot = None  # spot color when we dont have an array of colors
+    if vcolor:
+        # float to char
+        ifs_vcol = [(0, 0, 0)]  # EEKADOODLE - vertex start at 1
+        ifs_vcol.extend([col for col in vcolor.getFieldAsArray('color', 3, ancestry)])
+        ifs_color_index = geom.getFieldAsArray('colorIndex', 0, ancestry)
+
+        if not ifs_vcol:
+            vcolor_spot = vcolor.getFieldAsFloatTuple('color', [], ancestry)
+
+    # Convert faces into somthing blender can use
+    edges = []
+
+    # All lists are aligned!
+    faces = []
+    faces_uv = []  # if ifs_texfaces is empty then the faces_uv will match faces exactly.
+    faces_orig_index = []  # for ngons, we need to know our original index
+
+    if coords_tex and ifs_texfaces:
+        do_uvmap = True
+    else:
+        do_uvmap = False
+
+    # current_face = [0] # pointer anyone
+
+    def add_face(face, fuvs, orig_index):
+        l = len(face)
+        if l == 3 or l == 4:
+            faces.append(face)
+            # faces_orig_index.append(current_face[0])
+            if do_uvmap:
+                faces_uv.append(fuvs)
+
+            faces_orig_index.append(orig_index)
+        elif l == 2:
+            edges.append(face)
+        elif l > 4:
+            for i in range(2, len(face)):
+                faces.append([face[0], face[i - 1], face[i]])
+                if do_uvmap:
+                    faces_uv.append([fuvs[0], fuvs[i - 1], fuvs[i]])
+                faces_orig_index.append(orig_index)
+        else:
+            # faces with 1 verts? pfft!
+            # still will affect index ordering
+            pass
+
+    face = []
+    fuvs = []
+    orig_index = 0
+    for i, fi in enumerate(ifs_faces):
+        # ifs_texfaces and ifs_faces should be aligned
+        if fi != -1:
+            # face.append(int(fi)) # in rare cases this is a float
+            # EEKADOODLE!!!
+            # Annoyance where faces that have a zero index vert get rotated. This will then mess up UVs and VColors
+            face.append(int(fi) + 1)  # in rare cases this is a float, +1 because of stupid EEKADOODLE :/
+
+            if do_uvmap:
+                if i >= len(ifs_texfaces):
+                    print('\tWarning: UV Texface index out of range')
+                    fuvs.append(ifs_texfaces[0])
+                else:
+                    fuvs.append(ifs_texfaces[i])
+        else:
+            add_face(face, fuvs, orig_index)
+            face = []
+            if do_uvmap:
+                fuvs = []
+            orig_index += 1
+
+    add_face(face, fuvs, orig_index)
+    del add_face  # dont need this func anymore
+
+    bpymesh = bpy.data.meshes.new(name="XXX")
+
+    # EEKADOODLE
+    bpymesh.vertices.add(1 + (len(ifs_points)))
+    bpymesh.vertices.foreach_set("co", [0, 0, 0] + [a for v in ifs_points for a in v])  # XXX25 speed
+
+    # print(len(ifs_points), faces, edges, ngons)
+
+    try:
+        bpymesh.faces.add(len(faces))
+        bpymesh.faces.foreach_set("vertices_raw", [a for f in faces for a in (f + [0] if len(f) == 3 else f)])  # XXX25 speed
+    except KeyError:
+        print("one or more vert indicies out of range. corrupt file?")
+        #for f in faces:
+        #   bpymesh.faces.extend(faces, smooth=True)
+
+    # bpymesh.calcNormals()
+    bpymesh.update()
+
+    if len(bpymesh.faces) != len(faces):
+        print('\tWarning: adding faces did not work! file is invalid, not adding UVs or vcolors')
+        return bpymesh, ccw
+
+    # Apply UVs if we have them
+    if not do_uvmap:
+        faces_uv = faces  # fallback, we didnt need a uvmap in the first place, fallback to the face/vert mapping.
+    if coords_tex:
+        #print(ifs_texpoints)
+        # print(geom)
+        uvlay = bpymesh.uv_textures.new()
+
+        for i, f in enumerate(uvlay.data):
+            f.image = bpyima
+            fuv = faces_uv[i]  # uv indicies
+            for j, uv in enumerate(f.uv):
+                # print(fuv, j, len(ifs_texpoints))
+                try:
+                    f.uv[j] = ifs_texpoints[fuv[j]]  # XXX25, speedup
+                except:
+                    print('\tWarning: UV Index out of range')
+                    f.uv[j] = ifs_texpoints[0]  # XXX25, speedup
+
+    elif bpyima and len(bpymesh.faces):
+        # Oh Bugger! - we cant really use blenders ORCO for for texture space since texspace dosnt rotate.
+        # we have to create VRML's coords as UVs instead.
+
+        # VRML docs
+        '''
+        If the texCoord field is NULL, a default texture coordinate mapping is calculated using the local
+        coordinate system bounding box of the shape. The longest dimension of the bounding box defines the S coordinates,
+        and the next longest defines the T coordinates. If two or all three dimensions of the bounding box are equal,
+        ties shall be broken by choosing the X, Y, or Z dimension in that order of preference.
+        The value of the S coordinate ranges from 0 to 1, from one end of the bounding box to the other.
+        The T coordinate ranges between 0 and the ratio of the second greatest dimension of the bounding box to the greatest dimension.
+        '''
+
+        # Note, S,T == U,V
+        # U gets longest, V gets second longest
+        xmin, ymin, zmin = ifs_points[0]
+        xmax, ymax, zmax = ifs_points[0]
+        for co in ifs_points:
+            x, y, z = co
+            if x < xmin:
+                xmin = x
+            if y < ymin:
+                ymin = y
+            if z < zmin:
+                zmin = z
+
+            if x > xmax:
+                xmax = x
+            if y > ymax:
+                ymax = y
+            if z > zmax:
+                zmax = z
+
+        xlen = xmax - xmin
+        ylen = ymax - ymin
+        zlen = zmax - zmin
+
+        depth_min = xmin, ymin, zmin
+        depth_list = [xlen, ylen, zlen]
+        depth_sort = depth_list[:]
+        depth_sort.sort()
+
+        depth_idx = [depth_list.index(val) for val in depth_sort]
+
+        axis_u = depth_idx[-1]
+        axis_v = depth_idx[-2]  # second longest
+
+        # Hack, swap these !!! TODO - Why swap??? - it seems to work correctly but should not.
+        # axis_u,axis_v = axis_v,axis_u
+
+        min_u = depth_min[axis_u]
+        min_v = depth_min[axis_v]
+        depth_u = depth_list[axis_u]
+        depth_v = depth_list[axis_v]
+
+        depth_list[axis_u]
+
+        if axis_u == axis_v:
+            # This should be safe because when 2 axies have the same length, the lower index will be used.
+            axis_v += 1
+
+        uvlay = bpymesh.uv_textures.new()
+
+        # HACK !!! - seems to be compatible with Cosmo though.
+        depth_v = depth_u = max(depth_v, depth_u)
+
+        bpymesh_vertices = bpymesh.vertices[:]
+        bpymesh_faces = bpymesh.faces[:]
+
+        for j, f in enumerate(uvlay.data):
+            f.image = bpyima
+            fuv = f.uv
+            f_v = bpymesh_faces[j].vertices[:]  # XXX25 speed
+
+            for i, v in enumerate(f_v):
+                co = bpymesh_vertices[v].co
+                fuv[i] = (co[axis_u] - min_u) / depth_u, (co[axis_v] - min_v) / depth_v
+
+    # Add vcote
+    if vcolor:
+        # print(ifs_vcol)
+        collay = bpymesh.vertex_colors.new()
+
+        for f_idx, f in enumerate(collay.data):
+            fv = bpymesh.faces[f_idx].vertices[:]
+            if len(fv) == 3:  # XXX speed
+                fcol = f.color1, f.color2, f.color3
+            else:
+                fcol = f.color1, f.color2, f.color3, f.color4
+            if ifs_colorPerVertex:
+                for i, c in enumerate(fcol):
+                    color_index = fv[i]  # color index is vert index
+                    if ifs_color_index:
+                        try:
+                            color_index = ifs_color_index[color_index]
+                        except:
+                            print('\tWarning: per vertex color index out of range')
+                            continue
+
+                    if color_index < len(ifs_vcol):
+                        c.r, c.g, c.b = ifs_vcol[color_index]
+                    else:
+                        #print('\tWarning: per face color index out of range')
+                        pass
+            else:
+                if vcolor_spot:  # use 1 color, when ifs_vcol is []
+                    for c in fcol:
+                        c.r, c.g, c.b = vcolor_spot
+                else:
+                    color_index = faces_orig_index[f_idx]  # color index is face index
+                    #print(color_index, ifs_color_index)
+                    if ifs_color_index:
+                        if color_index >= len(ifs_color_index):
+                            print('\tWarning: per face color index out of range')
+                            color_index = 0
+                        else:
+                            color_index = ifs_color_index[color_index]
+                    try:
+                        col = ifs_vcol[color_index]
+                    except IndexError:
+                        # TODO, look
+                        col = (1.0, 1.0, 1.0)
+                    for i, c in enumerate(fcol):
+                        c.r, c.g, c.b = col
+
+    # XXX25
+    # bpymesh.vertices.delete([0, ])  # EEKADOODLE
+
+    return bpymesh, ccw
+
+
+def importMesh_IndexedLineSet(geom, ancestry):
+    # VRML not x3d
+    #coord = geom.getChildByName('coord') # 'Coordinate'
+    coord = geom.getChildBySpec('Coordinate')  # works for x3d and vrml
+    if coord:
+        points = coord.getFieldAsArray('point', 3, ancestry)
+    else:
+        points = []
+
+    if not points:
+        print('\tWarning: IndexedLineSet had no points')
+        return None
+
+    ils_lines = geom.getFieldAsArray('coordIndex', 0, ancestry)
+
+    lines = []
+    line = []
+
+    for il in ils_lines:
+        if il == -1:
+            lines.append(line)
+            line = []
+        else:
+            line.append(int(il))
+    lines.append(line)
+
+    # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color
+
+    bpycurve = bpy.data.curves.new('IndexedCurve', 'CURVE')
+    bpycurve.dimensions = '3D'
+
+    for line in lines:
+        if not line:
+            continue
+        co = points[line[0]]
+        nu = bpycurve.splines.new('POLY')
+        nu.points.add(len(line))
+
+        for il, pt in zip(line, nu.points):
+            pt.co[0:3] = points[il]
+
+    return bpycurve
+
+
+def importMesh_PointSet(geom, ancestry):
+    # VRML not x3d
+    #coord = geom.getChildByName('coord') # 'Coordinate'
+    coord = geom.getChildBySpec('Coordinate')  # works for x3d and vrml
+    if coord:
+        points = coord.getFieldAsArray('point', 3, ancestry)
+    else:
+        points = []
+
+    # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color
+
+    bpymesh = bpy.data.meshes.new("XXX")
+    bpymesh.vertices.add(len(points))
+    bpymesh.vertices.foreach_set("co", [a for v in points for a in v])
+
+    # bpymesh.calcNormals()  # will just be dummy normals
+    bpymesh.update()
+    return bpymesh
+
+GLOBALS['CIRCLE_DETAIL'] = 12
+
+
+def bpy_ops_add_object_hack():  # XXX25, evil
+    scene = bpy.context.scene
+    obj = scene.objects[0]
+    scene.objects.unlink(obj)
+    bpymesh = obj.data
+    bpy.data.objects.remove(obj)
+    return bpymesh
+
+
+def importMesh_Sphere(geom, ancestry):
+    diameter = geom.getFieldAsFloat('radius', 0.5, ancestry)
+    # bpymesh = Mesh.Primitives.UVsphere(GLOBALS['CIRCLE_DETAIL'], GLOBALS['CIRCLE_DETAIL'], diameter)
+
+    bpy.ops.mesh.primitive_uv_sphere_add(segments=GLOBALS['CIRCLE_DETAIL'],
+                                         ring_count=GLOBALS['CIRCLE_DETAIL'],
+                                         size=diameter,
+                                         view_align=False,
+                                         enter_editmode=False,
+                                         )
+
+    bpymesh = bpy_ops_add_object_hack()
+
+    bpymesh.transform(MATRIX_Z_TO_Y)
+    return bpymesh
+
+
+def importMesh_Cylinder(geom, ancestry):
+    # bpymesh = bpy.data.meshes.new()
+    diameter = geom.getFieldAsFloat('radius', 1.0, ancestry)
+    height = geom.getFieldAsFloat('height', 2, ancestry)
+
+    # bpymesh = Mesh.Primitives.Cylinder(GLOBALS['CIRCLE_DETAIL'], diameter, height)
+
+    bpy.ops.mesh.primitive_cylinder_add(vertices=GLOBALS['CIRCLE_DETAIL'],
+                                        radius=diameter,
+                                        depth=height,
+                                        cap_ends=True,
+                                        view_align=False,
+                                        enter_editmode=False,
+                                        )
+
+    bpymesh = bpy_ops_add_object_hack()
+
+    bpymesh.transform(MATRIX_Z_TO_Y)
+
+    # Warning - Rely in the order Blender adds verts
+    # not nice design but wont change soon.
+
+    bottom = geom.getFieldAsBool('bottom', True, ancestry)
+    side = geom.getFieldAsBool('side', True, ancestry)
+    top = geom.getFieldAsBool('top', True, ancestry)
+
+    if not top:  # last vert is top center of tri fan.
+        # bpymesh.vertices.delete([(GLOBALS['CIRCLE_DETAIL'] + GLOBALS['CIRCLE_DETAIL']) + 1])  # XXX25
+        pass
+
+    if not bottom:  # second last vert is bottom of triangle fan
+        # XXX25
+        # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL'] + GLOBALS['CIRCLE_DETAIL']])
+        pass
+
+    if not side:
+        # remove all quads
+        # XXX25
+        # bpymesh.faces.delete(1, [f for f in bpymesh.faces if len(f) == 4])
+        pass
+
+    return bpymesh
+
+
+def importMesh_Cone(geom, ancestry):
+    # bpymesh = bpy.data.meshes.new()
+    diameter = geom.getFieldAsFloat('bottomRadius', 1.0, ancestry)
+    height = geom.getFieldAsFloat('height', 2, ancestry)
+
+    # bpymesh = Mesh.Primitives.Cone(GLOBALS['CIRCLE_DETAIL'], diameter, height)
+
+    bpy.ops.mesh.primitive_cone_add(vertices=GLOBALS['CIRCLE_DETAIL'],
+                                    radius=diameter,
+                                    depth=height,
+                                    cap_end=True,
+                                    view_align=False,
+                                    enter_editmode=False,
+                                    )
+
+    bpymesh = bpy_ops_add_object_hack()
+
+    bpymesh.transform(MATRIX_Z_TO_Y)
+
+    # Warning - Rely in the order Blender adds verts
+    # not nice design but wont change soon.
+
+    bottom = geom.getFieldAsBool('bottom', True, ancestry)
+    side = geom.getFieldAsBool('side', True, ancestry)
+
+    if not bottom:  # last vert is on the bottom
+        # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL'] + 1]) # XXX25
+        pass
+    if not side:  # second last vert is on the pointy bit of the cone
+        # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL']]) # XXX25
+        pass
+
+    return bpymesh
+
+
+def importMesh_Box(geom, ancestry):
+    # bpymesh = bpy.data.meshes.new()
+
+    size = geom.getFieldAsFloatTuple('size', (2.0, 2.0, 2.0), ancestry)
+
+    # bpymesh = Mesh.Primitives.Cube(1.0)
+    bpy.ops.mesh.primitive_cube_add(view_align=False,
+                                    enter_editmode=False,
+                                    )
+
+    bpymesh = bpy_ops_add_object_hack()
+
+    # Scale the box to the size set
+    scale_mat = Matrix(((size[0], 0, 0), (0, size[1], 0), (0, 0, size[2]))) * 0.5
+    bpymesh.transform(scale_mat.resize4x4())
+
+    return bpymesh
+
+
+def importShape(node, ancestry):
+    vrmlname = node.getDefName()
+    if not vrmlname:
+        vrmlname = 'Shape'
+
+    # works 100% in vrml, but not x3d
+    #appr = node.getChildByName('appearance') # , 'Appearance'
+    #geom = node.getChildByName('geometry') # , 'IndexedFaceSet'
+
+    # Works in vrml and x3d
+    appr = node.getChildBySpec('Appearance')
+    geom = node.getChildBySpec(['IndexedFaceSet', 'IndexedLineSet', 'PointSet', 'Sphere', 'Box', 'Cylinder', 'Cone'])
+
+    # For now only import IndexedFaceSet's
+    if geom:
+        bpymat = None
+        bpyima = None
+        texmtx = None
+
+        depth = 0  # so we can set alpha face flag later
+
+        if appr:
+
+            #mat = appr.getChildByName('material') # 'Material'
+            #ima = appr.getChildByName('texture') # , 'ImageTexture'
+            #if ima and ima.getSpec() != 'ImageTexture':
+            #   print('\tWarning: texture type "%s" is not supported' % ima.getSpec())
+            #   ima = None
+            # textx = appr.getChildByName('textureTransform')
+
+            mat = appr.getChildBySpec('Material')
+            ima = appr.getChildBySpec('ImageTexture')
+
+            textx = appr.getChildBySpec('TextureTransform')
+
+            if textx:
+                texmtx = translateTexTransform(textx, ancestry)
+
+            # print(mat, ima)
+            if mat or ima:
+
+                if not mat:
+                    mat = ima  # This is a bit dumb, but just means we use default values for all
+
+                # all values between 0.0 and 1.0, defaults from VRML docs
+                bpymat = bpy.data.materials.new("XXX")
+                bpymat.ambient = mat.getFieldAsFloat('ambientIntensity', 0.2, ancestry)
+                bpymat.diffuse_color = mat.getFieldAsFloatTuple('diffuseColor', [0.8, 0.8, 0.8], ancestry)
+
+                # NOTE - blender dosnt support emmisive color
+                # Store in mirror color and approximate with emit.
+                emit = mat.getFieldAsFloatTuple('emissiveColor', [0.0, 0.0, 0.0], ancestry)
+                bpymat.mirror_color = emit
+                bpymat.emit = (emit[0] + emit[1] + emit[2]) / 3.0
+
+                bpymat.specular_hardness = int(1 + (510 * mat.getFieldAsFloat('shininess', 0.2, ancestry)))  # 0-1 -> 1-511
+                bpymat.specular_color = mat.getFieldAsFloatTuple('specularColor', [0.0, 0.0, 0.0], ancestry)
+                bpymat.alpha = 1.0 - mat.getFieldAsFloat('transparency', 0.0, ancestry)
+                if bpymat.alpha < 0.999:
+                    bpymat.use_transparency = True
+
+            if ima:
+                ima_url = ima.getFieldAsString('url', None, ancestry)
+
+                if ima_url == None:
+                    try:
+                        ima_url = ima.getFieldAsStringArray('url', ancestry)[0]  # in some cases we get a list of images.
+                    except:
+                        ima_url = None
+
+                if ima_url == None:
+                    print("\twarning, image with no URL, this is odd")
+                else:
+                    bpyima = image_utils.image_load(ima_url, dirName(node.getFilename()), place_holder=False, recursive=False, convert_callback=imageConvertCompat)
+                    if bpyima:
+                        texture = bpy.data.textures.new("XXX", 'IMAGE')
+                        texture.image = bpyima
+
+                        # Adds textures for materials (rendering)
+                        try:
+                            depth = bpyima.depth
+                        except:
+                            depth = -1
+
+                        if depth == 32:
+                            # Image has alpha
+                            bpymat.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA)
+                            texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha')
+                            bpymat.mode |= Material.Modes.ZTRANSP
+                            bpymat.alpha = 0.0
+                        else:
+                            mtex = bpymat.texture_slots.add()
+                            mtex.texture = texture
+                            mtex.texture_coords = 'UV'
+                            mtex.use_map_diffuse = True
+
+                        ima_repS = ima.getFieldAsBool('repeatS', True, ancestry)
+                        ima_repT = ima.getFieldAsBool('repeatT', True, ancestry)
+
+                        # To make this work properly we'd need to scale the UV's too, better to ignore th
+                        # texture.repeat =  max(1, ima_repS * 512), max(1, ima_repT * 512)
+
+                        if not ima_repS:
+                            bpyima.use_clamp_x = True
+                        if not ima_repT:
+                            bpyima.use_clamp_y = True
+
+        bpydata = None
+        geom_spec = geom.getSpec()
+        ccw = True
+        if geom_spec == 'IndexedFaceSet':
+            bpydata, ccw = importMesh_IndexedFaceSet(geom, bpyima, ancestry)
+        elif geom_spec == 'IndexedLineSet':
+            bpydata = importMesh_IndexedLineSet(geom, ancestry)
+        elif geom_spec == 'PointSet':
+            bpydata = importMesh_PointSet(geom, ancestry)
+        elif geom_spec == 'Sphere':
+            bpydata = importMesh_Sphere(geom, ancestry)
+        elif geom_spec == 'Box':
+            bpydata = importMesh_Box(geom, ancestry)
+        elif geom_spec == 'Cylinder':
+            bpydata = importMesh_Cylinder(geom, ancestry)
+        elif geom_spec == 'Cone':
+            bpydata = importMesh_Cone(geom, ancestry)
+        else:
+            print('\tWarning: unsupported type "%s"' % geom_spec)
+            return
+
+        if bpydata:
+            vrmlname = vrmlname + geom_spec
+
+            bpydata.name = vrmlname
+
+            bpyob = node.blendObject = bpy.data.objects.new(vrmlname, bpydata)
+            bpy.context.scene.objects.link(bpyob)
+
+            if type(bpydata) == bpy.types.Mesh:
+                is_solid = geom.getFieldAsBool('solid', True, ancestry)
+                creaseAngle = geom.getFieldAsFloat('creaseAngle', None, ancestry)
+
+                if creaseAngle != None:
+                    bpydata.auto_smooth_angle = 1 + int(min(79, creaseAngle * RAD_TO_DEG))
+                    bpydata.use_auto_smooth = True
+
+                # Only ever 1 material per shape
+                if bpymat:
+                    bpydata.materials.append(bpymat)
+
+                if bpydata.uv_textures:
+
+                    if depth == 32:  # set the faces alpha flag?
+                        transp = Mesh.FaceTranspModes.ALPHA
+                        for f in bpydata.uv_textures.active.data:
+                            f.blend_type = 'ALPHA'
+
+                    if texmtx:
+                        # Apply texture transform?
+                        uv_copy = Vector()
+                        for f in bpydata.uv_textures.active.data:
+                            fuv = f.uv
+                            for i, uv in enumerate(fuv):
+                                uv_copy.x = uv[0]
+                                uv_copy.y = uv[1]
+
+                                fuv[i] = (uv_copy * texmtx)[0:2]
+                # Done transforming the texture
+
+                # Must be here and not in IndexedFaceSet because it needs an object for the flip func. Messy :/
+                if not ccw:
+                    # bpydata.flipNormals()
+                    # XXX25
+                    pass
+
+            # else could be a curve for example
+
+            # Can transform data or object, better the object so we can instance the data
+            #bpymesh.transform(getFinalMatrix(node))
+            bpyob.matrix_world = getFinalMatrix(node, None, ancestry)
+
+
+def importLamp_PointLight(node, ancestry):
+    vrmlname = node.getDefName()
+    if not vrmlname:
+        vrmlname = 'PointLight'
+
+    # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO
+    # attenuation = node.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO
+    color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
+    intensity = node.getFieldAsFloat('intensity', 1.0, ancestry)  # max is documented to be 1.0 but some files have higher.
+    location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry)
+    # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
+    radius = node.getFieldAsFloat('radius', 100.0, ancestry)
+
+    bpylamp = bpy.data.lamps.new("ToDo", 'POINT')
+    bpylamp.energy = intensity
+    bpylamp.distance = radius
+    bpylamp.color = color
+
+    mtx = Matrix.Translation(Vector(location))
+
+    return bpylamp, mtx
+
+
+def importLamp_DirectionalLight(node, ancestry):
+    vrmlname = node.getDefName()
+    if not vrmlname:
+        vrmlname = 'DirectLight'
+
+    # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0) # TODO
+    color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
+    direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry)
+    intensity = node.getFieldAsFloat('intensity', 1.0, ancestry)  # max is documented to be 1.0 but some files have higher.
+    # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
+
+    bpylamp = bpy.data.lamps.new(vrmlname, 'SUN')
+    bpylamp.energy = intensity
+    bpylamp.color = color
+
+    # lamps have their direction as -z, yup
+    mtx = Vector(direction).to_track_quat('-Z', 'Y').to_matrix().resize4x4()
+
+    return bpylamp, mtx
+
+# looks like default values for beamWidth and cutOffAngle were swapped in VRML docs.
+
+
+def importLamp_SpotLight(node, ancestry):
+    vrmlname = node.getDefName()
+    if not vrmlname:
+        vrmlname = 'SpotLight'
+
+    # ambientIntensity = geom.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO
+    # attenuation = geom.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO
+    beamWidth = node.getFieldAsFloat('beamWidth', 1.570796, ancestry)  # max is documented to be 1.0 but some files have higher.
+    color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
+    cutOffAngle = node.getFieldAsFloat('cutOffAngle', 0.785398, ancestry) * 2.0  # max is documented to be 1.0 but some files have higher.
+    direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry)
+    intensity = node.getFieldAsFloat('intensity', 1.0, ancestry)  # max is documented to be 1.0 but some files have higher.
+    location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry)
+    # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
+    radius = node.getFieldAsFloat('radius', 100.0, ancestry)
+
+    bpylamp = bpy.data.lamps.new(vrmlname, 'SPOT')
+    bpylamp.energy = intensity
+    bpylamp.distance = radius
+    bpylamp.color = color
+    bpylamp.spot_size = cutOffAngle
+    if beamWidth > cutOffAngle:
+        bpylamp.spot_blend = 0.0
+    else:
+        if cutOffAngle == 0.0:  # this should never happen!
+            bpylamp.spot_blend = 0.5
+        else:
+            bpylamp.spot_blend = beamWidth / cutOffAngle
+
+    # Convert
+
+    # lamps have their direction as -z, y==up
+    mtx = Matrix.Translation(Vector(location)) * Vector(direction).to_track_quat('-Z', 'Y').to_matrix().resize4x4()
+
+    return bpylamp, mtx
+
+
+def importLamp(node, spec, ancestry):
+    if spec == 'PointLight':
+        bpylamp, mtx = importLamp_PointLight(node, ancestry)
+    elif spec == 'DirectionalLight':
+        bpylamp, mtx = importLamp_DirectionalLight(node, ancestry)
+    elif spec == 'SpotLight':
+        bpylamp, mtx = importLamp_SpotLight(node, ancestry)
+    else:
+        print("Error, not a lamp")
+        raise ValueError
+
+    bpyob = node.blendObject = bpy.data.objects.new("TODO", bpylamp)
+    bpy.context.scene.objects.link(bpyob)
+
+    bpyob.matrix_world = getFinalMatrix(node, mtx, ancestry)
+
+
+def importViewpoint(node, ancestry):
+    name = node.getDefName()
+    if not name:
+        name = 'Viewpoint'
+
+    fieldOfView = node.getFieldAsFloat('fieldOfView', 0.785398, ancestry)  # max is documented to be 1.0 but some files have higher.
+    # jump = node.getFieldAsBool('jump', True, ancestry)
+    orientation = node.getFieldAsFloatTuple('orientation', (0.0, 0.0, 1.0, 0.0), ancestry)
+    position = node.getFieldAsFloatTuple('position', (0.0, 0.0, 0.0), ancestry)
+    description = node.getFieldAsString('description', '', ancestry)
+
+    bpycam = bpy.data.cameras.new(name)
+
+    bpycam.angle = fieldOfView
+
+    mtx = Matrix.Translation(Vector(position)) * translateRotation(orientation)
+
+    bpyob = node.blendObject = bpy.data.objects.new("TODO", bpycam)
+    bpy.context.scene.objects.link(bpyob)
+    bpyob.matrix_world = getFinalMatrix(node, mtx, ancestry)
+
+
+def importTransform(node, ancestry):
+    name = node.getDefName()
+    if not name:
+        name = 'Transform'
+
+    bpyob = node.blendObject = bpy.data.objects.new(name, None)
+    bpy.context.scene.objects.link(bpyob)
+
+    bpyob.matrix_world = getFinalMatrix(node, None, ancestry)
+
+    # so they are not too annoying
+    bpyob.empty_draw_type = 'PLAIN_AXES'
+    bpyob.empty_draw_size = 0.2
+
+
+#def importTimeSensor(node):
+def action_fcurve_ensure(action, data_path, array_index):
+    for fcu in action.fcurves:
+        if fcu.data_path == data_path and fcu.array_index == array_index:
+            return fcu
+
+    return action.fcurves.new(data_path=data_path, array_index=array_index)
+
+
+def translatePositionInterpolator(node, action, ancestry):
+    key = node.getFieldAsArray('key', 0, ancestry)
+    keyValue = node.getFieldAsArray('keyValue', 3, ancestry)
+
+    loc_x = action_fcurve_ensure(action, "location", 0)
+    loc_y = action_fcurve_ensure(action, "location", 1)
+    loc_z = action_fcurve_ensure(action, "location", 2)
+
+    for i, time in enumerate(key):
+        try:
+            x, y, z = keyValue[i]
+        except:
+            continue
+
+        loc_x.keyframe_points.add(time, x)
+        loc_y.keyframe_points.add(time, y)
+        loc_z.keyframe_points.add(time, z)
+
+    for fcu in (loc_x, loc_y, loc_z):
+        for kf in fcu.keyframe_points:
+            kf.interpolation = 'LINEAR'
+
+
+def translateOrientationInterpolator(node, action, ancestry):
+    key = node.getFieldAsArray('key', 0, ancestry)
+    keyValue = node.getFieldAsArray('keyValue', 4, ancestry)
+
+    rot_x = action_fcurve_ensure(action, "rotation_euler", 0)
+    rot_y = action_fcurve_ensure(action, "rotation_euler", 1)
+    rot_z = action_fcurve_ensure(action, "rotation_euler", 2)
+
+    for i, time in enumerate(key):
+        try:
+            x, y, z, w = keyValue[i]
+        except:
+            continue
+
+        mtx = translateRotation((x, y, z, w))
+        eul = mtx.to_euler()
+        rot_x.keyframe_points.add(time, eul.x)
+        rot_y.keyframe_points.add(time, eul.y)
+        rot_z.keyframe_points.add(time, eul.z)
+
+    for fcu in (rot_x, rot_y, rot_z):
+        for kf in fcu.keyframe_points:
+            kf.interpolation = 'LINEAR'
+
+
+# Untested!
+def translateScalarInterpolator(node, action, ancestry):
+    key = node.getFieldAsArray('key', 0, ancestry)
+    keyValue = node.getFieldAsArray('keyValue', 4, ancestry)
+
+    sca_x = action_fcurve_ensure(action, "scale", 0)
+    sca_y = action_fcurve_ensure(action, "scale", 1)
+    sca_z = action_fcurve_ensure(action, "scale", 2)
+
+    for i, time in enumerate(key):
+        try:
+            x, y, z = keyValue[i]
+        except:
+            continue
+
+        sca_x.keyframe_points.new(time, x)
+        sca_y.keyframe_points.new(time, y)
+        sca_z.keyframe_points.new(time, z)
+
+
+def translateTimeSensor(node, action, ancestry):
+    '''
+    Apply a time sensor to an action, VRML has many combinations of loop/start/stop/cycle times
+    to give different results, for now just do the basics
+    '''
+
+    # XXX25 TODO
+    if 1:
+        return
+
+    time_cu = action.addCurve('Time')
+    time_cu.interpolation = Blender.IpoCurve.InterpTypes.LINEAR
+
+    cycleInterval = node.getFieldAsFloat('cycleInterval', None, ancestry)
+
+    startTime = node.getFieldAsFloat('startTime', 0.0, ancestry)
+    stopTime = node.getFieldAsFloat('stopTime', 250.0, ancestry)
+
+    if cycleInterval != None:
+        stopTime = startTime + cycleInterval
+
+    loop = node.getFieldAsBool('loop', False, ancestry)
+
+    time_cu.append((1 + startTime, 0.0))
+    time_cu.append((1 + stopTime, 1.0 / 10.0))  # anoying, the UI uses /10
+
+    if loop:
+        time_cu.extend = Blender.IpoCurve.ExtendTypes.CYCLIC  # or - EXTRAP, CYCLIC_EXTRAP, CONST,
+
+
+def importRoute(node, ancestry):
+    '''
+    Animation route only at the moment
+    '''
+
+    if not hasattr(node, 'fields'):
+        return
+
+    routeIpoDict = node.getRouteIpoDict()
+
+    def getIpo(id):
+        try:
+            action = routeIpoDict[id]
+        except:
+            action = routeIpoDict[id] = bpy.data.actions.new('web3d_ipo')
+        return action
+
+    # for getting definitions
+    defDict = node.getDefDict()
+    '''
+    Handles routing nodes to eachother
+
+ROUTE vpPI.value_changed TO champFly001.set_position
+ROUTE vpOI.value_changed TO champFly001.set_orientation
+ROUTE vpTs.fraction_changed TO vpPI.set_fraction
+ROUTE vpTs.fraction_changed TO vpOI.set_fraction
+ROUTE champFly001.bindTime TO vpTs.set_startTime
+    '''
+
+    #from_id, from_type = node.id[1].split('.')
+    #to_id, to_type = node.id[3].split('.')
+
+    #value_changed
+    set_position_node = None
+    set_orientation_node = None
+    time_node = None
+
+    for field in node.fields:
+        if field and field[0] == 'ROUTE':
+            try:
+                from_id, from_type = field[1].split('.')
+                to_id, to_type = field[3].split('.')
+            except:
+                print("Warning, invalid ROUTE", field)
+                continue
+
+            if from_type == 'value_changed':
+                if to_type == 'set_position':
+                    action = getIpo(to_id)
+                    set_data_from_node = defDict[from_id]
+                    translatePositionInterpolator(set_data_from_node, action, ancestry)
+
+                if to_type in ('set_orientation', 'rotation'):
+                    action = getIpo(to_id)
+                    set_data_from_node = defDict[from_id]
+                    translateOrientationInterpolator(set_data_from_node, action, ancestry)
+
+                if to_type == 'set_scale':
+                    action = getIpo(to_id)
+                    set_data_from_node = defDict[from_id]
+                    translateScalarInterpolator(set_data_from_node, action, ancestry)
+
+            elif from_type == 'bindTime':
+                action = getIpo(from_id)
+                time_node = defDict[to_id]
+                translateTimeSensor(time_node, action, ancestry)
+
+
+def load_web3d(path, PREF_FLAT=False, PREF_CIRCLE_DIV=16, HELPER_FUNC=None):
+
+    # Used when adding blender primitives
+    GLOBALS['CIRCLE_DETAIL'] = PREF_CIRCLE_DIV
+
+    #root_node = vrml_parse('/_Cylinder.wrl')
+    if path.lower().endswith('.x3d'):
+        root_node, msg = x3d_parse(path)
+    else:
+        root_node, msg = vrml_parse(path)
+
+    if not root_node:
+        print(msg)
+        return
+
+    # fill with tuples - (node, [parents-parent, parent])
+    all_nodes = root_node.getSerialized([], [])
+
+    for node, ancestry in all_nodes:
+        #if 'castle.wrl' not in node.getFilename():
+        #   continue
+
+        spec = node.getSpec()
+        '''
+        prefix = node.getPrefix()
+        if prefix=='PROTO':
+            pass
+        else
+        '''
+        if HELPER_FUNC and HELPER_FUNC(node, ancestry):
+            # Note, include this function so the VRML/X3D importer can be extended
+            # by an external script. - gets first pick
+            pass
+        if spec == 'Shape':
+            importShape(node, ancestry)
+        elif spec in ('PointLight', 'DirectionalLight', 'SpotLight'):
+            importLamp(node, spec, ancestry)
+        elif spec == 'Viewpoint':
+            importViewpoint(node, ancestry)
+        elif spec == 'Transform':
+            # Only use transform nodes when we are not importing a flat object hierarchy
+            if PREF_FLAT == False:
+                importTransform(node, ancestry)
+            '''
+        # These are delt with later within importRoute
+        elif spec=='PositionInterpolator':
+            action = bpy.data.ipos.new('web3d_ipo', 'Object')
+            translatePositionInterpolator(node, action)
+            '''
+
+    # After we import all nodes, route events - anim paths
+    for node, ancestry in all_nodes:
+        importRoute(node, ancestry)
+
+    for node, ancestry in all_nodes:
+        if node.isRoot():
+            # we know that all nodes referenced from will be in
+            # routeIpoDict so no need to run node.getDefDict() for every node.
+            routeIpoDict = node.getRouteIpoDict()
+            defDict = node.getDefDict()
+
+            for key, action in routeIpoDict.items():
+
+                # Assign anim curves
+                node = defDict[key]
+                if node.blendObject == None:  # Add an object if we need one for animation
+                    node.blendObject = bpy.data.objects.new('AnimOb', None)  # , name)
+                    bpy.context.scene.objects.link(node.blendObject)
+
+                if node.blendObject.animation_data is None:
+                    node.blendObject.animation_data_create()
+
+                node.blendObject.animation_data.action = action
+
+    # Add in hierarchy
+    if PREF_FLAT == False:
+        child_dict = {}
+        for node, ancestry in all_nodes:
+            if node.blendObject:
+                blendObject = None
+
+                # Get the last parent
+                i = len(ancestry)
+                while i:
+                    i -= 1
+                    blendObject = ancestry[i].blendObject
+                    if blendObject:
+                        break
+
+                if blendObject:
+                    # Parent Slow, - 1 liner but works
+                    # blendObject.makeParent([node.blendObject], 0, 1)
+
+                    # Parent FAST
+                    try:
+                        child_dict[blendObject].append(node.blendObject)
+                    except:
+                        child_dict[blendObject] = [node.blendObject]
+
+        # Parent
+        for parent, children in child_dict.items():
+            for c in children:
+                c.parent = parent
+
+        # update deps
+        bpy.context.scene.update()
+        del child_dict
+
+
+def load(operator, context, filepath=""):
+
+    load_web3d(filepath,
+               PREF_FLAT=True,
+               PREF_CIRCLE_DIV=16,
+               )
+
+    return {'FINISHED'}