diff --git a/io_import_scene_lwo.py b/io_import_scene_lwo.py index 5c7bef7a3aecd639a67d18fc8e3b910a35648237..69fa27300d88b8cfe9a274ddbbab8b710db27d1a 100644 --- a/io_import_scene_lwo.py +++ b/io_import_scene_lwo.py @@ -16,8 +16,6 @@ # # ##### END GPL LICENSE BLOCK ##### -# <pep8 compliant> - bl_info= { "name": "Import LightWave Objects", "author": "Ken Nign (Ken9)", diff --git a/io_import_scene_mhx.py b/io_import_scene_mhx.py index 911d53b9c1477043df3f17958004fc44eb4efd0c..59d24494127216dda6496ce143e44d010cd1d47b 100644 --- a/io_import_scene_mhx.py +++ b/io_import_scene_mhx.py @@ -16,8 +16,6 @@ # # ##### END GPL LICENSE BLOCK ##### -# <pep8 compliant> - # Project Name: MakeHuman # Product Home Page: http://www.makehuman.org/ # Code Home Page: http://code.google.com/p/makehuman/ diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 5926506d12405904987dc984ac7a6046ad75f5bc..162e30de82f81bae895966e88e34e0974a637345 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -32,58 +32,58 @@ from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode. #Some of the chunks that we will export #----- Primary Chunk, at the beginning of each file -PRIMARY= 0x4D4D +PRIMARY = 0x4D4D #------ Main Chunks -OBJECTINFO = 0x3D3D #This gives the version of the mesh and is found right before the material and object information -VERSION = 0x0002 #This gives the version of the .3ds file -KFDATA = 0xB000 #This is the header for all of the key frame info +OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information +VERSION = 0x0002 # This gives the version of the .3ds file +KFDATA = 0xB000 # This is the header for all of the key frame info #------ sub defines of OBJECTINFO -MATERIAL=45055 #0xAFFF // This stored the texture info -OBJECT=16384 #0x4000 // This stores the faces, vertices, etc... +MATERIAL = 45055 # 0xAFFF // This stored the texture info +OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc... #>------ sub defines of MATERIAL -MATNAME = 0xA000 # This holds the material name -MATAMBIENT = 0xA010 # Ambient color of the object/material -MATDIFFUSE = 0xA020 # This holds the color of the object/material -MATSPECULAR = 0xA030 # SPecular color of the object/material -MATSHINESS = 0xA040 # ?? -MATMAP = 0xA200 # This is a header for a new material -MATMAPFILE = 0xA300 # This holds the file name of the texture +MATNAME = 0xA000 # This holds the material name +MATAMBIENT = 0xA010 # Ambient color of the object/material +MATDIFFUSE = 0xA020 # This holds the color of the object/material +MATSPECULAR = 0xA030 # SPecular color of the object/material +MATSHINESS = 0xA040 # ?? +MATMAP = 0xA200 # This is a header for a new material +MATMAPFILE = 0xA300 # This holds the file name of the texture -RGB1= 0x0011 -RGB2= 0x0012 +RGB1 = 0x0011 +RGB2 = 0x0012 #>------ sub defines of OBJECT -OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object -OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object -OBJECT_CAMERA= 0x4700 # This lets un know we are reading a camera object +OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object +OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object +OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object #>------ sub defines of CAMERA -OBJECT_CAM_RANGES= 0x4720 # The camera range values +OBJECT_CAM_RANGES = 0x4720 # The camera range values #>------ sub defines of OBJECT_MESH -OBJECT_VERTICES = 0x4110 # The objects vertices -OBJECT_FACES = 0x4120 # The objects faces -OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color -OBJECT_UV = 0x4140 # The UV texture coordinates -OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix +OBJECT_VERTICES = 0x4110 # The objects vertices +OBJECT_FACES = 0x4120 # The objects faces +OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color +OBJECT_UV = 0x4140 # The UV texture coordinates +OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix #>------ sub defines of KFDATA -KFDATA_KFHDR = 0xB00A -KFDATA_KFSEG = 0xB008 -KFDATA_KFCURTIME = 0xB009 -KFDATA_OBJECT_NODE_TAG = 0xB002 +KFDATA_KFHDR = 0xB00A +KFDATA_KFSEG = 0xB008 +KFDATA_KFCURTIME = 0xB009 +KFDATA_OBJECT_NODE_TAG = 0xB002 #>------ sub defines of OBJECT_NODE_TAG -OBJECT_NODE_ID = 0xB030 -OBJECT_NODE_HDR = 0xB010 -OBJECT_PIVOT = 0xB013 -OBJECT_INSTANCE_NAME = 0xB011 -POS_TRACK_TAG = 0xB020 -ROT_TRACK_TAG = 0xB021 -SCL_TRACK_TAG = 0xB022 +OBJECT_NODE_ID = 0xB030 +OBJECT_NODE_HDR = 0xB010 +OBJECT_PIVOT = 0xB013 +OBJECT_INSTANCE_NAME = 0xB011 +POS_TRACK_TAG = 0xB020 +ROT_TRACK_TAG = 0xB021 +SCL_TRACK_TAG = 0xB022 import struct @@ -91,6 +91,8 @@ import struct # this is verry annoying for filenames! name_unique = [] # stores str, ascii only name_mapping = {} # stores {orig: byte} mapping + + def sane_name(name): name_fixed = name_mapping.get(name) if name_fixed is not None: @@ -102,62 +104,69 @@ def sane_name(name): while new_name in name_unique: new_name = new_name_clean + ".%.3d" % i - i+=1 + i += 1 # note, appending the 'str' version. name_unique.append(new_name) name_mapping[name] = new_name = new_name.encode("ASCII", "replace") return new_name + def uv_key(uv): return round(uv[0], 6), round(uv[1], 6) # size defines: SZ_SHORT = 2 -SZ_INT = 4 +SZ_INT = 4 SZ_FLOAT = 4 + class _3ds_short(object): '''Class representing a short (2-byte integer) for a 3ds file. *** This looks like an unsigned short H is unsigned from the struct docs - Cam***''' - __slots__ = ('value', ) + __slots__ = ("value", ) + def __init__(self, val=0): self.value = val def get_size(self): return SZ_SHORT - def write(self,file): + def write(self, file): file.write(struct.pack("<H", self.value)) def __str__(self): return str(self.value) + class _3ds_int(object): '''Class representing an int (4-byte integer) for a 3ds file.''' - __slots__ = ('value', ) + __slots__ = ("value", ) + def __init__(self, val): self.value = val def get_size(self): return SZ_INT - def write(self,file): + def write(self, file): file.write(struct.pack("<I", self.value)) def __str__(self): return str(self.value) + class _3ds_float(object): '''Class representing a 4-byte IEEE floating point number for a 3ds file.''' - __slots__ = ('value', ) + __slots__ = ("value", ) + def __init__(self, val): - self.value=val + self.value = val def get_size(self): return SZ_FLOAT - def write(self,file): + def write(self, file): file.write(struct.pack("<f", self.value)) def __str__(self): @@ -166,31 +175,34 @@ class _3ds_float(object): class _3ds_string(object): '''Class representing a zero-terminated string for a 3ds file.''' - __slots__ = ('value', ) + __slots__ = ("value", ) + def __init__(self, val): assert(type(val) == bytes) self.value = val def get_size(self): - return (len(self.value)+1) + return (len(self.value) + 1) - def write(self,file): - binary_format = "<%ds" % (len(self.value)+1) + def write(self, file): + binary_format = "<%ds" % (len(self.value) + 1) file.write(struct.pack(binary_format, self.value)) def __str__(self): return self.value + class _3ds_point_3d(object): '''Class representing a three-dimensional point for a 3ds file.''' - __slots__ = 'x','y','z' + __slots__ = "x", "y", "z" + def __init__(self, point): self.x, self.y, self.z = point def get_size(self): - return 3*SZ_FLOAT + return 3 * SZ_FLOAT - def write(self,file): + def write(self, file): file.write(struct.pack('<3f', self.x, self.y, self.z)) def __str__(self): @@ -200,7 +212,7 @@ class _3ds_point_3d(object): """ class _3ds_point_4d(object): '''Class representing a four-dimensional point for a 3ds file, for instance a quaternion.''' - __slots__ = 'x','y','z','w' + __slots__ = "x","y","z","w" def __init__(self, point=(0.0,0.0,0.0,0.0)): self.x, self.y, self.z, self.w = point @@ -215,76 +227,84 @@ class _3ds_point_4d(object): return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w) """ + class _3ds_point_uv(object): '''Class representing a UV-coordinate for a 3ds file.''' - __slots__ = ('uv', ) + __slots__ = ("uv", ) + def __init__(self, point): self.uv = point def __cmp__(self, other): - return cmp(self.uv,other.uv) + return cmp(self.uv, other.uv) def get_size(self): - return 2*SZ_FLOAT + return 2 * SZ_FLOAT - def write(self,file): - data=struct.pack('<2f', self.uv[0], self.uv[1]) + def write(self, file): + data = struct.pack('<2f', self.uv[0], self.uv[1]) file.write(data) def __str__(self): return '(%g, %g)' % self.uv + class _3ds_rgb_color(object): '''Class representing a (24-bit) rgb color for a 3ds file.''' - __slots__ = 'r','g','b' + __slots__ = "r", "g", "b" + def __init__(self, col): self.r, self.g, self.b = col def get_size(self): return 3 - def write(self,file): - file.write( struct.pack('<3B', int(255*self.r), int(255*self.g), int(255*self.b) ) ) -# file.write( struct.pack('<3c', chr(int(255*self.r)), chr(int(255*self.g)), chr(int(255*self.b)) ) ) + def write(self, file): + file.write(struct.pack('<3B', int(255 * self.r), int(255 * self.g), int(255 * self.b))) +# file.write(struct.pack('<3c', chr(int(255*self.r)), chr(int(255*self.g)), chr(int(255*self.b)) ) ) def __str__(self): return '{%f, %f, %f}' % (self.r, self.g, self.b) + class _3ds_face(object): '''Class representing a face for a 3ds file.''' - __slots__ = ('vindex', ) + __slots__ = ("vindex", ) + def __init__(self, vindex): self.vindex = vindex def get_size(self): - return 4*SZ_SHORT + return 4 * SZ_SHORT - def write(self,file): + def write(self, file): # The last zero is only used by 3d studio - file.write(struct.pack("<4H", self.vindex[0],self.vindex[1], self.vindex[2], 0)) + file.write(struct.pack("<4H", self.vindex[0], self.vindex[1], self.vindex[2], 0)) def __str__(self): - return '[%d %d %d]' % (self.vindex[0],self.vindex[1], self.vindex[2]) + return "[%d %d %d]" % (self.vindex[0], self.vindex[1], self.vindex[2]) + class _3ds_array(object): '''Class representing an array of variables for a 3ds file. Consists of a _3ds_short to indicate the number of items, followed by the items themselves. ''' - __slots__ = 'values', 'size' + __slots__ = "values", "size" + def __init__(self): - self.values=[] - self.size=SZ_SHORT + self.values = [] + self.size = SZ_SHORT # add an item: - def add(self,item): + def add(self, item): self.values.append(item) - self.size+=item.get_size() + self.size += item.get_size() def get_size(self): return self.size - def write(self,file): + def write(self, file): _3ds_short(len(self.values)).write(file) #_3ds_int(len(self.values)).write(file) for value in self.values: @@ -295,13 +315,15 @@ class _3ds_array(object): def __str__(self): return '(%d items)' % len(self.values) + class _3ds_named_variable(object): '''Convenience class for named variables.''' - __slots__ = 'value', 'name' + __slots__ = "value", "name" + def __init__(self, name, val=None): - self.name=name - self.value=val + self.name = name + self.value = val def get_size(self): if self.value is None: @@ -313,12 +335,12 @@ class _3ds_named_variable(object): if self.value is not None: self.value.write(file) - def dump(self,indent): + def dump(self, indent): if self.value is not None: - spaces="" + spaces = "" for i in range(indent): spaces += " " - if (self.name!=""): + if (self.name != ""): print(spaces, self.name, " = ", self.value) else: print(spaces, "[unnamed]", " = ", self.value) @@ -330,21 +352,22 @@ class _3ds_chunk(object): Chunks contain zero or more variables, followed by zero or more subchunks. ''' - __slots__ = 'ID', 'size', 'variables', 'subchunks' + __slots__ = "ID", "size", "variables", "subchunks" + def __init__(self, id=0): - self.ID=_3ds_short(id) - self.size=_3ds_int(0) - self.variables=[] - self.subchunks=[] + self.ID = _3ds_short(id) + self.size = _3ds_int(0) + self.variables = [] + self.subchunks = [] def set_ID(id): - self.ID=_3ds_short(id) + self.ID = _3ds_short(id) def add_variable(self, name, var): '''Add a named variable. The name is mostly for debugging purposes.''' - self.variables.append(_3ds_named_variable(name,var)) + self.variables.append(_3ds_named_variable(name, var)) def add_subchunk(self, chunk): '''Add a subchunk.''' @@ -354,12 +377,12 @@ class _3ds_chunk(object): '''Calculate the size of the chunk and return it. The sizes of the variables and subchunks are used to determine this chunk\'s size.''' - tmpsize=self.ID.get_size()+self.size.get_size() + tmpsize = self.ID.get_size() + self.size.get_size() for variable in self.variables: - tmpsize+=variable.get_size() + tmpsize += variable.get_size() for subchunk in self.subchunks: - tmpsize+=subchunk.get_size() - self.size.value=tmpsize + tmpsize += subchunk.get_size() + self.size.value = tmpsize return self.size.value def write(self, file): @@ -374,21 +397,19 @@ class _3ds_chunk(object): for subchunk in self.subchunks: subchunk.write(file) - def dump(self, indent=0): '''Write the chunk to a file. Dump is used for debugging purposes, to dump the contents of a chunk to the standard output. Uses the dump function of the named variables and the subchunks to do the actual work.''' - spaces="" + spaces = "" for i in range(indent): spaces += " " print(spaces, "ID=", hex(self.ID.value), "size=", self.get_size()) for variable in self.variables: - variable.dump(indent+1) + variable.dump(indent + 1) for subchunk in self.subchunks: - subchunk.dump(indent+1) - + subchunk.dump(indent + 1) ###################################################### @@ -443,51 +464,56 @@ def make_material_texture_chunk(id, images): return mat_sub + def make_material_chunk(material, image): '''Make a material chunk out of a blender material.''' material_chunk = _3ds_chunk(MATERIAL) name = _3ds_chunk(MATNAME) - if material: name_str = material.name - else: name_str = 'None' - if image: name_str += image.name + name_str = material.name if material else "None" + + if image: + name_str += image.name name.add_variable("name", _3ds_string(sane_name(name_str))) material_chunk.add_subchunk(name) if not material: - material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0,0,0) )) - material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (.8, .8, .8) )) - material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1,1,1) )) + material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0, 0, 0))) + material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (.8, .8, .8))) + material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1, 1, 1))) else: - material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.ambient for a in material.diffuse_color] )) + material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a * material.ambient for a in material.diffuse_color])) # material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.amb for a in material.rgbCol] )) material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.diffuse_color)) # material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.rgbCol)) material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color)) # material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specCol)) - images = get_material_images(material) # can be None - if image: images.append(image) + images = get_material_images(material) # can be None + if image: + images.append(image) if images: material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images)) return material_chunk + class tri_wrapper(object): '''Class representing a triangle. Used when converting faces to triangles''' - __slots__ = 'vertex_index', 'mat', 'image', 'faceuvs', 'offset' - def __init__(self, vindex=(0,0,0), mat=None, image=None, faceuvs=None): - self.vertex_index= vindex - self.mat= mat - self.image= image - self.faceuvs= faceuvs - self.offset= [0, 0, 0] # offset indices + __slots__ = "vertex_index", "mat", "image", "faceuvs", "offset" + + def __init__(self, vindex=(0, 0, 0), mat=None, image=None, faceuvs=None): + self.vertex_index = vindex + self.mat = mat + self.image = image + self.faceuvs = faceuvs + self.offset = [0, 0, 0] # offset indices def extract_triangles(mesh): @@ -513,28 +539,29 @@ def extract_triangles(mesh): # f_uv = (uf.uv1, uf.uv2, uf.uv3, uf.uv4) if face.vertices[3] else (uf.uv1, uf.uv2, uf.uv3) # f_uv = face.uv img = uf.image if uf else None -# img = face.image - if img: img = img.name + if img is not None: + img = img.name # if f_v[3] == 0: - if len(f_v)==3: + if len(f_v) == 3: new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img) # new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img) - if (do_uv): new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2]) + if (do_uv): + new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2]) tri_list.append(new_tri) - else: #it's a quad + else: # it's a quad new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img) # new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img) new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), face.material_index, img) # new_tri_2 = tri_wrapper((f_v[0].index, f_v[2].index, f_v[3].index), face.mat, img) if (do_uv): - new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2]) - new_tri_2.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3]) + new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2]) + new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3]) - tri_list.append( new_tri ) - tri_list.append( new_tri_2 ) + tri_list.append(new_tri) + tri_list.append(new_tri_2) return tri_list @@ -548,7 +575,7 @@ def remove_face_uv(verts, tri_list): # initialize a list of UniqueLists, one per vertex: #uv_list = [UniqueList() for i in xrange(len(verts))] - unique_uvs= [{} for i in range(len(verts))] + unique_uvs = [{} for i in range(len(verts))] # for each face uv coordinate, add it to the UniqueList of the vertex for tri in tri_list: @@ -556,8 +583,8 @@ def remove_face_uv(verts, tri_list): # store the index into the UniqueList for future reference: # offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i]))) - context_uv_vert= unique_uvs[tri.vertex_index[i]] - uvkey= tri.faceuvs[i] + context_uv_vert = unique_uvs[tri.vertex_index[i]] + uvkey = tri.faceuvs[i] offset_index__uv_3ds = context_uv_vert.get(uvkey) @@ -566,8 +593,6 @@ def remove_face_uv(verts, tri_list): tri.offset[i] = offset_index__uv_3ds[0] - - # At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it # only once. @@ -577,10 +602,10 @@ def remove_face_uv(verts, tri_list): vert_array = _3ds_array() uv_array = _3ds_array() index_list = [] - for i,vert in enumerate(verts): + for i, vert in enumerate(verts): index_list.append(vert_index) - pt = _3ds_point_3d(vert.co) # reuse, should be ok + pt = _3ds_point_3d(vert.co) # reuse, should be ok uvmap = [None] * len(unique_uvs[i]) for ii, uv_3ds in unique_uvs[i].values(): # add a vertex duplicate to the vertex_array for every uv associated with this vertex: @@ -601,11 +626,12 @@ def remove_face_uv(verts, tri_list): # Make sure the triangle vertex indices now refer to the new vertex list: for tri in tri_list: for i in range(3): - tri.offset[i]+=index_list[tri.vertex_index[i]] - tri.vertex_index= tri.offset + tri.offset[i] += index_list[tri.vertex_index[i]] + tri.vertex_index = tri.offset return vert_array, uv_array, tri_list + def make_faces_chunk(tri_list, mesh, materialDict): '''Make a chunk for the faces. @@ -618,79 +644,79 @@ def make_faces_chunk(tri_list, mesh, materialDict): face_chunk = _3ds_chunk(OBJECT_FACES) face_list = _3ds_array() - - if len(mesh.uv_textures): -# if mesh.faceUV: + if mesh.uv_textures: # Gather materials used in this mesh - mat/image pairs unique_mats = {} - for i,tri in enumerate(tri_list): + for i, tri in enumerate(tri_list): face_list.add(_3ds_face(tri.vertex_index)) if materials: mat = materials[tri.mat] - if mat: mat = mat.name + if mat: + mat = mat.name img = tri.image try: context_mat_face_array = unique_mats[mat, img][1] except: - - if mat: name_str = mat - else: name_str = 'None' - if img: name_str += img + name_str = mat if mat else "None" + if img: + name_str += img context_mat_face_array = _3ds_array() unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array - context_mat_face_array.add(_3ds_short(i)) # obj_material_faces[tri.mat].add(_3ds_short(i)) face_chunk.add_variable("faces", face_list) for mat_name, mat_faces in unique_mats.values(): - obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL) + obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL) obj_material_chunk.add_variable("name", mat_name) obj_material_chunk.add_variable("face_list", mat_faces) face_chunk.add_subchunk(obj_material_chunk) else: - obj_material_faces=[] - obj_material_names=[] + obj_material_faces = [] + obj_material_names = [] for m in materials: if m: obj_material_names.append(_3ds_string(sane_name(m.name))) obj_material_faces.append(_3ds_array()) n_materials = len(obj_material_names) - for i,tri in enumerate(tri_list): + for i, tri in enumerate(tri_list): face_list.add(_3ds_face(tri.vertex_index)) if (tri.mat < n_materials): obj_material_faces[tri.mat].add(_3ds_short(i)) face_chunk.add_variable("faces", face_list) for i in range(n_materials): - obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL) + obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL) obj_material_chunk.add_variable("name", obj_material_names[i]) obj_material_chunk.add_variable("face_list", obj_material_faces[i]) face_chunk.add_subchunk(obj_material_chunk) return face_chunk + def make_vert_chunk(vert_array): '''Make a vertex chunk out of an array of vertices.''' vert_chunk = _3ds_chunk(OBJECT_VERTICES) - vert_chunk.add_variable("vertices",vert_array) + vert_chunk.add_variable("vertices", vert_array) return vert_chunk + def make_uv_chunk(uv_array): '''Make a UV chunk out of an array of UVs.''' uv_chunk = _3ds_chunk(OBJECT_UV) uv_chunk.add_variable("uv coords", uv_array) return uv_chunk + def make_mesh_chunk(mesh, materialDict): '''Make a chunk out of a Blender mesh.''' @@ -906,7 +932,6 @@ def save(operator, context, filepath="", mesh_objects = [] scene = context.scene - if use_selection: objects = (ob for ob in scene.objects if ob.is_visible(scene) and ob.select) else: @@ -950,22 +975,18 @@ def save(operator, context, filepath="", if mat_index >= mat_ls_len: mat_index = f.mat = 0 mat = mat_ls[mat_index] - if mat: mat_name = mat.name - else: mat_name = None + mat_name = None if mat is None else mat.name # else there already set to none img = uf.image -# img = f.image - if img: img_name = img.name - else: img_name = None - - materialDict.setdefault((mat_name, img_name), (mat, img) ) + img_name = None if img is None else img.name + materialDict.setdefault((mat_name, img_name), (mat, img)) else: for mat in mat_ls: - if mat: # material may be None so check its not. - materialDict.setdefault((mat.name, None), (mat, None) ) + if mat: # material may be None so check its not. + materialDict.setdefault((mat.name, None), (mat, None)) # Why 0 Why! for f in data.faces: @@ -977,7 +998,6 @@ def save(operator, context, filepath="", if free: free_derived_objects(ob) - # Make material chunks for all materials used in the meshes: for mat_and_image in materialDict.values(): object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1])) @@ -1012,7 +1032,7 @@ def save(operator, context, filepath="", bpy.data.meshes.remove(blender_mesh) # blender_mesh.vertices = None - i+=i + i += i # Create chunks for all empties: ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 34dc370d57973c1efc7a0a84f81bc1e777fa561f..856698a7d69904dbf405f56d6c04e6fd2f9ecb21 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -39,43 +39,43 @@ BOUNDS_3DS = [] #Some of the chunks that we will see #----- Primary Chunk, at the beginning of each file -PRIMARY = int('0x4D4D',16) +PRIMARY = 0x4D4D #------ Main Chunks -OBJECTINFO = 0x3D3D #This gives the version of the mesh and is found right before the material and object information -VERSION = 0x0002 #This gives the version of the .3ds file -EDITKEYFRAME= 0xB000 #This is the header for all of the key frame info +OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information +VERSION = 0x0002 # This gives the version of the .3ds file +EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info #------ sub defines of OBJECTINFO -MATERIAL = 45055 #0xAFFF // This stored the texture info -OBJECT = 16384 #0x4000 // This stores the faces, vertices, etc... +MATERIAL = 0xAFFF # This stored the texture info +OBJECT = 0x4000 # This stores the faces, vertices, etc... #>------ sub defines of MATERIAL #------ sub defines of MATERIAL_BLOCK -MAT_NAME = 0xA000 # This holds the material name -MAT_AMBIENT = 0xA010 # Ambient color of the object/material -MAT_DIFFUSE = 0xA020 # This holds the color of the object/material -MAT_SPECULAR = 0xA030 # SPecular color of the object/material -MAT_SHINESS = 0xA040 # ?? -MAT_TRANSPARENCY= 0xA050 # Transparency value of material -MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material -MAT_WIRE = 0xA085 # Only render's wireframe - -MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map -MAT_SPECULAR_MAP= 0xA204 # This is a header for a new specular map -MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map -MAT_REFLECTION_MAP= 0xA220 # This is a header for a new reflection map -MAT_BUMP_MAP = 0xA230 # This is a header for a new bump map -MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture - -MAT_FLOAT_COLOR = 0x0010 #color defined as 3 floats -MAT_24BIT_COLOR = 0x0011 #color defined as 3 bytes +MAT_NAME = 0xA000 # This holds the material name +MAT_AMBIENT = 0xA010 # Ambient color of the object/material +MAT_DIFFUSE = 0xA020 # This holds the color of the object/material +MAT_SPECULAR = 0xA030 # SPecular color of the object/material +MAT_SHINESS = 0xA040 # ?? +MAT_TRANSPARENCY = 0xA050 # Transparency value of material +MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material +MAT_WIRE = 0xA085 # Only render's wireframe + +MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map +MAT_SPECULAR_MAP = 0xA204 # This is a header for a new specular map +MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map +MAT_REFLECTION_MAP = 0xA220 # This is a header for a new reflection map +MAT_BUMP_MAP = 0xA230 # This is a header for a new bump map +MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture + +MAT_FLOAT_COLOR = 0x0010 # color defined as 3 floats +MAT_24BIT_COLOR = 0x0011 # color defined as 3 bytes #>------ sub defines of OBJECT -OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object -OBJECT_LAMP = 0x4600 # This lets un know we are reading a light object -OBJECT_LAMP_SPOT = 0x4610 # The light is a spotloght. -OBJECT_LAMP_OFF = 0x4620 # The light off. +OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object +OBJECT_LAMP = 0x4600 # This lets un know we are reading a light object +OBJECT_LAMP_SPOT = 0x4610 # The light is a spotloght. +OBJECT_LAMP_OFF = 0x4620 # The light off. OBJECT_LAMP_ATTENUATE = 0x4625 OBJECT_LAMP_RAYSHADE = 0x4627 OBJECT_LAMP_SHADOWED = 0x4630 @@ -95,51 +95,49 @@ OBJECT_LAMP_OUTER_RANGE = 0x465A OBJECT_LAMP_MULTIPLIER = 0x465B OBJECT_LAMP_AMBIENT_LIGHT = 0x4680 - - -OBJECT_CAMERA= 0x4700 # This lets un know we are reading a camera object +OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object #>------ sub defines of CAMERA -OBJECT_CAM_RANGES= 0x4720 # The camera range values +OBJECT_CAM_RANGES = 0x4720 # The camera range values #>------ sub defines of OBJECT_MESH -OBJECT_VERTICES = 0x4110 # The objects vertices -OBJECT_FACES = 0x4120 # The objects faces -OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color -OBJECT_UV = 0x4140 # The UV texture coordinates -OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix +OBJECT_VERTICES = 0x4110 # The objects vertices +OBJECT_FACES = 0x4120 # The objects faces +OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color +OBJECT_UV = 0x4140 # The UV texture coordinates +OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix #>------ sub defines of EDITKEYFRAME -ED_KEY_AMBIENT_NODE = 0xB001 -ED_KEY_OBJECT_NODE = 0xB002 -ED_KEY_CAMERA_NODE = 0xB003 -ED_KEY_TARGET_NODE = 0xB004 -ED_KEY_LIGHT_NODE = 0xB005 -ED_KEY_L_TARGET_NODE = 0xB006 -ED_KEY_SPOTLIGHT_NODE = 0xB007 +ED_KEY_AMBIENT_NODE = 0xB001 +ED_KEY_OBJECT_NODE = 0xB002 +ED_KEY_CAMERA_NODE = 0xB003 +ED_KEY_TARGET_NODE = 0xB004 +ED_KEY_LIGHT_NODE = 0xB005 +ED_KEY_L_TARGET_NODE = 0xB006 +ED_KEY_SPOTLIGHT_NODE = 0xB007 #>------ sub defines of ED_KEY_OBJECT_NODE -# EK_OB_KEYFRAME_SEG = 0xB008 -# EK_OB_KEYFRAME_CURTIME = 0xB009 -# EK_OB_KEYFRAME_HEADER = 0xB00A -EK_OB_NODE_HEADER = 0xB010 -EK_OB_INSTANCE_NAME = 0xB011 -# EK_OB_PRESCALE = 0xB012 -EK_OB_PIVOT = 0xB013 -# EK_OB_BOUNDBOX = 0xB014 -# EK_OB_MORPH_SMOOTH = 0xB015 -EK_OB_POSITION_TRACK = 0xB020 -EK_OB_ROTATION_TRACK = 0xB021 -EK_OB_SCALE_TRACK = 0xB022 -# EK_OB_CAMERA_FOV_TRACK = 0xB023 -# EK_OB_CAMERA_ROLL_TRACK = 0xB024 -# EK_OB_COLOR_TRACK = 0xB025 -# EK_OB_MORPH_TRACK = 0xB026 -# EK_OB_HOTSPOT_TRACK = 0xB027 -# EK_OB_FALLOF_TRACK = 0xB028 -# EK_OB_HIDE_TRACK = 0xB029 -# EK_OB_NODE_ID = 0xB030 - -ROOT_OBJECT = 0xFFFF +# EK_OB_KEYFRAME_SEG = 0xB008 +# EK_OB_KEYFRAME_CURTIME = 0xB009 +# EK_OB_KEYFRAME_HEADER = 0xB00A +EK_OB_NODE_HEADER = 0xB010 +EK_OB_INSTANCE_NAME = 0xB011 +# EK_OB_PRESCALE = 0xB012 +EK_OB_PIVOT = 0xB013 +# EK_OB_BOUNDBOX = 0xB014 +# EK_OB_MORPH_SMOOTH = 0xB015 +EK_OB_POSITION_TRACK = 0xB020 +EK_OB_ROTATION_TRACK = 0xB021 +EK_OB_SCALE_TRACK = 0xB022 +# EK_OB_CAMERA_FOV_TRACK = 0xB023 +# EK_OB_CAMERA_ROLL_TRACK = 0xB024 +# EK_OB_COLOR_TRACK = 0xB025 +# EK_OB_MORPH_TRACK = 0xB026 +# EK_OB_HOTSPOT_TRACK = 0xB027 +# EK_OB_FALLOF_TRACK = 0xB028 +# EK_OB_HIDE_TRACK = 0xB029 +# EK_OB_NODE_ID = 0xB030 + +ROOT_OBJECT = 0xFFFF global scn scn = None @@ -155,7 +153,7 @@ class chunk: bytes_read = 0 #we don't read in the bytes_read, we compute that - binary_format='<HI' + binary_format = "<HI" def __init__(self): self.ID = 0 @@ -168,6 +166,7 @@ class chunk: print('length: ', self.length) print('bytes_read: ', self.bytes_read) + def read_chunk(file, chunk): temp_data = file.read(struct.calcsize(chunk.binary_format)) data = struct.unpack(chunk.binary_format, temp_data) @@ -179,6 +178,7 @@ def read_chunk(file, chunk): #if debugging #chunk.dump() + def read_string(file): #read in the characters till we get a null character s = b'' @@ -196,6 +196,8 @@ def read_string(file): ###################################################### # IMPORT ###################################################### + + def process_next_object_chunk(file, previous_chunk): new_chunk = chunk() temp_chunk = chunk() @@ -204,9 +206,10 @@ def process_next_object_chunk(file, previous_chunk): #read the next chunk read_chunk(file, new_chunk) + def skip_to_end(file, skip_chunk): buffer_size = skip_chunk.length - skip_chunk.bytes_read - binary_format='%ic' % buffer_size + binary_format = "%ic" % buffer_size temp_data = file.read(struct.calcsize(binary_format)) skip_chunk.bytes_read += buffer_size @@ -239,14 +242,14 @@ def add_texture_to_material(image, texture, material, mapto): def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): #print previous_chunk.bytes_read, 'BYTES READ' contextObName = None - contextLamp = [None, None] # object, Data + contextLamp = [None, None] # object, Data contextMaterial = None - contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity() + contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity() #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity() - contextMesh_vertls = None # flat array: (verts * 3) + contextMesh_vertls = None # flat array: (verts * 3) contextMesh_facels = None - contextMeshMaterials = {} # matname:[face_idxs] - contextMeshUV = None # flat array (verts * 2) + contextMeshMaterials = {} # matname:[face_idxs] + contextMeshUV = None # flat array (verts * 2) TEXTURE_DICT = {} MATDICT = {} @@ -264,10 +267,10 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff') # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT' # only init once - object_list = [] # for hierarchy - object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent - pivot_list = [] # pivots with hierarchy handling - + object_list = [] # for hierarchy + object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent + pivot_list = [] # pivots with hierarchy handling + def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials): bmesh = bpy.data.meshes.new(contextObName) @@ -276,15 +279,15 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): if myContextMesh_vertls: - bmesh.vertices.add(len(myContextMesh_vertls)//3) + bmesh.vertices.add(len(myContextMesh_vertls) // 3) bmesh.faces.add(len(myContextMesh_facels)) bmesh.vertices.foreach_set("co", myContextMesh_vertls) - + eekadoodle_faces = [] for v1, v2, v3 in myContextMesh_facels: eekadoodle_faces.extend([v3, v1, v2, 0] if v3 == 0 else [v1, v2, v3, 0]) bmesh.faces.foreach_set("vertices_raw", eekadoodle_faces) - + if bmesh.faces and contextMeshUV: bmesh.uv_textures.new() uv_faces = bmesh.uv_textures.active.data[:] @@ -298,7 +301,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): bmat = MATDICT[matName][1] img = TEXTURE_DICT.get(bmat.name) - bmesh.materials.append(bmat) # can be None + bmesh.materials.append(bmat) # can be None if uv_faces and img: for fidx in faces: @@ -309,16 +312,16 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): else: for fidx in faces: bmesh.faces[fidx].material_index = mat_idx - + if uv_faces: for fidx, uf in enumerate(uv_faces): face = myContextMesh_facels[fidx] v1, v2, v3 = face - + # eekadoodle if v3 == 0: v1, v2, v3 = v3, v1, v2 - + uf.uv1 = contextMeshUV[v1 * 2:(v1 * 2) + 2] uf.uv2 = contextMeshUV[v2 * 2:(v2 * 2) + 2] uf.uv3 = contextMeshUV[v3 * 2:(v3 * 2) + 2] @@ -330,12 +333,12 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): ob = bpy.data.objects.new(contextObName, bmesh) object_dictionary[contextObName] = ob SCN.objects.link(ob) - + ''' if contextMatrix_tx: ob.setMatrix(contextMatrix_tx) ''' - + if contextMatrix_rot: ob.matrix_local = contextMatrix_rot object_matrix[ob] = contextMatrix_rot.copy() @@ -356,7 +359,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): def read_byte_color(temp_chunk): temp_data = file.read(struct.calcsize('3B')) temp_chunk.bytes_read += 3 - return [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb + return [float(col) / 255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb def read_texture(new_chunk, temp_chunk, name, mapto): new_texture = bpy.data.textures.new(name, type='IMAGE') @@ -369,7 +372,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): if (temp_chunk.ID == MAT_MAP_FILEPATH): texture_name, read_str_len = read_string(file) img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname) - new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed + new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed else: skip_to_end(file, temp_chunk) @@ -397,7 +400,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): #it's an unsigned short (H) temp_data = file.read(struct.calcsize('I')) version = struct.unpack('<I', temp_data)[0] - new_chunk.bytes_read += 4 #read the 4 bytes for the version number + new_chunk.bytes_read += 4 # read the 4 bytes for the version number #this loader works with version 3 and below, but may not with 4 and above if (version > 3): print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) @@ -416,10 +419,11 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): if CreateBlenderObject: putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials) - contextMesh_vertls = []; contextMesh_facels = [] + contextMesh_vertls = [] + contextMesh_facels = [] ## preparando para receber o proximo objeto - contextMeshMaterials = {} # matname:[face_idxs] + contextMeshMaterials = {} # matname:[face_idxs] contextMeshUV = None #contextMesh.vertexUV = 1 # Make sticky coords. # Reset matrix @@ -447,8 +451,8 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): #plus one for the null character that ended the string new_chunk.bytes_read += read_str_len - contextMaterial.name = material_name.rstrip() # remove trailing whitespace - MATDICT[material_name]= (contextMaterial.name, contextMaterial) + contextMaterial.name = material_name.rstrip() # remove trailing whitespace + MATDICT[material_name] = (contextMaterial.name, contextMaterial) elif (new_chunk.ID == MAT_AMBIENT): #print 'elif (new_chunk.ID == MAT_AMBIENT):' @@ -522,15 +526,14 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) temp_chunk.bytes_read += 2 - contextMaterial.alpha = 1-(float(struct.unpack('<H', temp_data)[0])/100) + contextMaterial.alpha = 1 - (float(struct.unpack('<H', temp_data)[0]) / 100) new_chunk.bytes_read += temp_chunk.bytes_read - - elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support. + elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support. temp_data = file.read(STRUCT_SIZE_3FLOAT) - x,y,z = struct.unpack('<3f', temp_data) + x, y, z = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT # no lamp in dict that would be confusing @@ -566,7 +569,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts)) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts # dummyvert is not used atm! - + #print 'object verts: bytes read: ', new_chunk.bytes_read elif (new_chunk.ID == OBJECT_FACES): @@ -578,24 +581,23 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): # print '\ngetting a face' temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces) - new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces #4 short ints x 2 bytes each + new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces # 4 short ints x 2 bytes each contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data) contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)] elif (new_chunk.ID == OBJECT_MATERIAL): # print 'elif (new_chunk.ID == OBJECT_MATERIAL):' material_name, read_str_len = read_string(file) - new_chunk.bytes_read += read_str_len # remove 1 null character. + new_chunk.bytes_read += read_str_len # remove 1 null character. temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_faces_using_mat = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT - temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat - contextMeshMaterials[material_name]= struct.unpack("<%dH" % (num_faces_using_mat), temp_data) + contextMeshMaterials[material_name] = struct.unpack("<%dH" % (num_faces_using_mat), temp_data) #look up the material in all the materials @@ -611,7 +613,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): elif (new_chunk.ID == OBJECT_TRANS_MATRIX): # How do we know the matrix size? 54 == 4x4 48 == 4x3 temp_data = file.read(STRUCT_SIZE_4x3MAT) - data = list( struct.unpack('<ffffffffffff', temp_data) ) + data = list(struct.unpack('<ffffffffffff', temp_data)) new_chunk.bytes_read += STRUCT_SIZE_4x3MAT contextMatrix_rot = mathutils.Matrix((data[:3] + [0], \ @@ -629,7 +631,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname) # img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH) - new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed + new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed elif new_chunk.ID == EDITKEYFRAME: pass @@ -647,7 +649,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): object_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) - new_chunk.bytes_read += 4 + new_chunk.bytes_read += 4 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) hierarchy = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 @@ -655,8 +657,8 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): child = object_dictionary.get(object_name) if child is None: - child = bpy.data.objects.new(object_name, None) # create an empty object - SCN.objects.link(child) + child = bpy.data.objects.new(object_name, None) # create an empty object + SCN.objects.link(child) object_list.append(child) object_parent.append(hierarchy) @@ -670,13 +672,13 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): new_chunk.bytes_read += read_str_len # print("new instance object:", object_name) - elif new_chunk.ID == EK_OB_PIVOT: # translation + elif new_chunk.ID == EK_OB_PIVOT: # translation temp_data = file.read(STRUCT_SIZE_3FLOAT) pivot = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT - pivot_list[len(pivot_list)-1] = mathutils.Vector(pivot) + pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot) - elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation + elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) @@ -695,7 +697,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): if nframe == 0: child.location = loc - elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation + elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) @@ -709,12 +711,12 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 temp_data = file.read(STRUCT_SIZE_4FLOAT) - rad,axis_x,axis_y,axis_z = struct.unpack('<4f', temp_data) + rad, axis_x, axis_y, axis_z = struct.unpack("<4f", temp_data) new_chunk.bytes_read += STRUCT_SIZE_4FLOAT if nframe == 0: child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler() # why negative? - elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation + elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) @@ -733,15 +735,14 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): if nframe == 0: child.scale = sca - else: #(new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL): + else: # (new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL): # print 'skipping to end of this chunk' #print("unknown chunk: "+hex(new_chunk.ID)) buffer_size = new_chunk.length - new_chunk.bytes_read - binary_format='%ic' % buffer_size + binary_format = "%ic" % buffer_size temp_data = file.read(struct.calcsize(binary_format)) new_chunk.bytes_read += buffer_size - #update the previous chunk bytes read # print 'previous_chunk.bytes_read += new_chunk.bytes_read' # print previous_chunk.bytes_read, new_chunk.bytes_read @@ -753,8 +754,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): if CreateBlenderObject: putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials) - - # Assign parents to objects + # Assign parents to objects for ind, ob in enumerate(object_list): parent = object_parent[ind] if parent == ROOT_OBJECT: @@ -764,7 +764,7 @@ def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): # pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining? # fix pivots for ind, ob in enumerate(object_list): - if ob.type == 'MESH': + if ob.type == 'MESH': pivot = pivot_list[ind] pivot_matrix = object_matrix.get(ob, mathutils.Matrix()) # unlikely to fail pivot_matrix = mathutils.Matrix.Translation(-pivot * pivot_matrix.to_3x3()) @@ -790,40 +790,15 @@ def load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, #here we go! # print 'reading the first chunk' read_chunk(file, current_chunk) - if (current_chunk.ID!=PRIMARY): + if (current_chunk.ID != PRIMARY): print('\tFatal Error: Not a valid 3ds file: %r' % filepath) file.close() return - - # IMPORT_AS_INSTANCE = Blender.Draw.Create(0) -# IMPORT_CONSTRAIN_BOUNDS = Blender.Draw.Create(10.0) -# IMAGE_SEARCH = Blender.Draw.Create(1) -# APPLY_MATRIX = Blender.Draw.Create(0) - - # Get USER Options -# pup_block = [\ -# ('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\ -# ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\ -# ('Transform Fix', APPLY_MATRIX, 'Workaround for object transformations importing incorrectly'),\ -# #('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\ -# ] - -# if PREF_UI: -# if not Blender.Draw.PupBlock('Import 3DS...', pup_block): -# return - -# Blender.Window.WaitCursor(1) - -# IMPORT_CONSTRAIN_BOUNDS = IMPORT_CONSTRAIN_BOUNDS.val -# # IMPORT_AS_INSTANCE = IMPORT_AS_INSTANCE.val -# IMAGE_SEARCH = IMAGE_SEARCH.val -# APPLY_MATRIX = APPLY_MATRIX.val - if IMPORT_CONSTRAIN_BOUNDS: - BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30] + BOUNDS_3DS[:] = [1 << 30, 1 << 30, 1 << 30, -1 << 30, -1 << 30, -1 << 30] else: - BOUNDS_3DS[:]= [] + BOUNDS_3DS[:] = [] ##IMAGE_SEARCH @@ -837,7 +812,7 @@ def load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, # SCN_OBJECTS = scn.objects # SCN_OBJECTS.selected = [] # de select all - importedObjects = [] # Fill this list with objects + importedObjects = [] # Fill this list with objects process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH) # fixme, make unglobal @@ -887,36 +862,36 @@ def load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, for ob in importedObjects: if ob.type == 'MESH': # if ob.type=='Mesh': - ob.makeDisplayList() # Why dosnt this update the bounds? + ob.makeDisplayList() # Why dosnt this update the bounds? for v in ob.getBoundBox(): - for i in (0,1,2): + for i in (0, 1, 2): if v[i] < BOUNDS_3DS[i]: - BOUNDS_3DS[i]= v[i] # min + BOUNDS_3DS[i] = v[i] # min if v[i] > BOUNDS_3DS[i + 3]: - BOUNDS_3DS[i + 3]= v[i] # min + BOUNDS_3DS[i + 3] = v[i] # min # Get the max axis x/y/z - max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2]) + max_axis = max(BOUNDS_3DS[3] - BOUNDS_3DS[0], BOUNDS_3DS[4] - BOUNDS_3DS[1], BOUNDS_3DS[5] - BOUNDS_3DS[2]) # print max_axis - if max_axis < 1 << 30: # Should never be false but just make sure. + if max_axis < 1 << 30: # Should never be false but just make sure. # Get a new scale factor if set as an option SCALE = 1.0 while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS: - SCALE/=10 + SCALE /= 10.0 # SCALE Matrix SCALE_MAT = mathutils.Matrix.Scale(SCALE, 4) for ob in importedObjects: if ob.parent is None: - ob.matrix_world = ob.matrix_world * SCALE_MAT + ob.matrix_world = ob.matrix_world * SCALE_MAT # Done constraining to bounds. # Select all new objects. - print(" done in %.4f sec." % (time.clock()-time1)) + print(" done in %.4f sec." % (time.clock() - time1)) file.close()