Newer
Older
**Project Name:** MakeHuman
**Code Home Page:** http://code.google.com/p/makehuman/
**Authors:** Thomas Larsson
**Copyright(c):** MakeHuman Team 2001-2010
**Licensing:** GPL3 (see also http://sites.google.com/site/makehumandocs/licensing)
**Coding Standards:** See http://sites.google.com/site/makehumandocs/developers-guide
Abstract
MHX (MakeHuman eXchange format) importer for Blender 2.5x.
bl_addon_info = {
'name': 'Import: MakeHuman (.mhx)',
'author': 'Thomas Larsson',
'version': '1.0',
'blender': (2, 5, 4),
"api": 31913,
"location": "File > Import",
"description": "Import files in the MakeHuman eXchange format (.mhx)",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/File_I-O/Make_Human",
"tracker_url": "https://projects.blender.org/tracker/index.php?func=detail&aid=21872&group_id=153&atid=469",
"category": "Import/Export"}
"""
Place this file in the .blender/scripts/addons dir
You have to activated the script in the "Add-Ons" tab (user preferences).
Access from the File > Import menu.
"""
MAJOR_VERSION = 1
MINOR_VERSION = 0
BLENDER_VERSION = (2, 54, 0)
#
#
#
import bpy
import os
import time
import mathutils
from mathutils import *
import string
MHX249 = False
Blender24 = False
Blender25 = True
TexDir = "~/makehuman/exports"
#
#
#
theScale = 1.0
One = 1.0/theScale
useMesh = 1
verbosity = 2
warnedTextureDir = False
warnedVersion = False
true = True
false = False
Epsilon = 1e-6
nErrors = 0
theTempDatum = None
todo = []
#
# toggle flags
T_EnforceVersion = 0x01
T_Clothes = 0x02
T_Stretch = 0x04
T_Bend = 0x08
T_Diamond = 0x10
T_Mesh = 0x100
T_Armature = 0x200
T_Proxy = 0x400
T_Cage = 0x800
T_Rigify = 0x1000
T_Preset = 0x2000
T_Symm = 0x4000
T_MHX = 0x8000
toggle = T_EnforceVersion + T_Replace + T_Mesh + T_Armature + T_Face + T_Shape + T_Proxy + T_Clothes
# setFlagsAndFloats(rigFlags):
# Global floats
#fFingerPanel = 0.0
#fFingerIK = 0.0
fNoStretch = 0.0
# rigLeg and rigArm flags
#T_GoboFoot = 0x0002
#T_InvFoot = 0x0010
#T_InvFootPT = 0x0020
#T_InvFootNoPT = 0x0040
#T_FingerPanel = 0x100
#T_FingerRot = 0x0200
#T_FingerIK = 0x0400
#T_LocalFKIK = 0x8000
#rigLeg = 0
#rigArm = 0
def setFlagsAndFloats():
'''
global toggle, rigLeg, rigArm
(footRig, fingerRig) = rigFlags
rigLeg = 0
if footRig == 'Reverse foot':
rigLeg |= T_InvFoot
if toggle & T_PoleTar:
rigLeg |= T_InvFootPT
else:
rigLeg |= T_InvFootNoPT
elif footRig == 'Gobo': rigLeg |= T_GoboFoot
rigArm = 0
if fingerRig == 'Panel': rigArm |= T_FingerPanel
elif fingerRig == 'Rotation': rigArm |= T_FingerRot
elif fingerRig == 'IK': rigArm |= T_FingerIK
toggle |= T_Panel
'''
global fNoStretch
if toggle&T_Stretch: fNoStretch == 0.0
else: fNoStretch = 1.0
# Dictionaries
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
'NONE' : {},
'Object' : {},
'Mesh' : {},
'Armature' : {},
'Lamp' : {},
'Camera' : {},
'Lattice' : {},
'Curve' : {},
'Text' : {},
'Material' : {},
'Image' : {},
'MaterialTextureSlot' : {},
'Texture' : {},
'Bone' : {},
'BoneGroup' : {},
'Rigify' : {},
'Action' : {},
'Group' : {},
'MeshTextureFaceLayer' : {},
'MeshColorLayer' : {},
'VertexGroup' : {},
'ShapeKey' : {},
'ParticleSystem' : {},
'ObjectConstraints' : {},
'ObjectModifiers' : {},
'MaterialSlot' : {},
'Object' : 'objects',
'Mesh' : 'meshes',
'Lattice' : 'lattices',
'Curve' : 'curves',
'Text' : 'texts',
'Group' : 'groups',
'Empty' : 'empties',
'Armature' : 'armatures',
'Bone' : 'bones',
'BoneGroup' : 'bone_groups',
'Pose' : 'poses',
'PoseBone' : 'pose_bones',
'Material' : 'materials',
'Texture' : 'textures',
'Image' : 'images',
'Camera' : 'cameras',
'Lamp' : 'lamps',
'World' : 'worlds',
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
# checkBlenderVersion()
#
def checkBlenderVersion():
print("Found Blender", bpy.app.version)
(A, B, C) = bpy.app.version
(a, b, c) = BLENDER_VERSION
if a <= A: return
if b <= B: return
if c <= C: return
msg = (
"This version of the MHX importer only works with Blender (%d, %d, %d) or later. " % (a, b, c) +
"Download a more recent Blender from www.blender.org or www.graphicall.org.\n"
)
raise NameError(msg)
return
#
# readMhxFile(filePath, scale):
#
def readMhxFile(filePath, scale):
global todo, nErrors, theScale, defaultScale, One, toggle
checkBlenderVersion()
theScale = scale
defaultScale = scale
One = 1.0/theScale
fileName = os.path.expanduser(filePath)
(shortName, ext) = os.path.splitext(fileName)
if ext.lower() != ".mhx":
print("Error: Not a mhx file: " + fileName)
return
print( "Opening MHX file "+ fileName )
time1 = time.clock()
ignore = False
stack = []
tokens = []
key = "toplevel"
level = 0
nErrors = 0
comment = 0
nesting = 0
setFlagsAndFloats()
file= open(fileName, "rU")
print( "Tokenizing" )
lineNo = 0
for line in file:
# print(line)
lineSplit= line.split()
lineNo += 1
if len(lineSplit) == 0:
pass
elif lineSplit[0][0] == '#':
if lineSplit[0] == '#if':
if comment == nesting:
try:
res = eval(lineSplit[1])
except:
res = False
if res:
comment += 1
nesting += 1
elif lineSplit[0] == '#else':
if comment == nesting-1:
comment += 1
elif comment == nesting:
comment -= 1
elif lineSplit[0] == '#endif':
if comment == nesting:
comment -= 1
nesting -= 1
elif comment < nesting:
pass
elif lineSplit[0] == 'end':
try:
sub = tokens
tokens = stack.pop()
if tokens:
tokens[-1][2] = sub
level -= 1
except:
print( "Tokenizer error at or before line %d" % lineNo )
print( line )
dummy = stack.pop()
elif lineSplit[-1] == ';':
if lineSplit[0] == '\\':
key = lineSplit[1]
tokens.append([key,lineSplit[2:-1],[]])
else:
key = lineSplit[0]
tokens.append([key,lineSplit[1:-1],[]])
else:
key = lineSplit[0]
tokens.append([key,lineSplit[1:],[]])
stack.append(tokens)
level += 1
tokens = []
file.close()
if level != 0:
raise NameError("Tokenizer out of kilter %d" % level)
clearScene()
print( "Parsing" )
parse(tokens)
for (expr, glbals, lcals) in todo:
try:
print("Doing %s" % expr)
exec(expr, glbals, lcals)
except:
msg = "Failed: "+expr
print( msg )
nErrors += 1
#raise NameError(msg)
print("Postprocess")
postProcess()
print("HideLayers")
hideLayers()
time2 = time.clock()
print("toggle = %x" % toggle)
msg = "File %s loaded in %g s" % (fileName, time2-time1)
if nErrors:
msg += " but there where %d errors. " % (nErrors)
print(msg)
return
#
# getObject(name, var, glbals, lcals):
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
try:
ob = loadedData['Object'][name]
except:
if name != "None":
pushOnTodoList(None, "ob = loadedData['Object'][name]" % globals(), locals())
ob = None
return ob
#
# checkMhxVersion(major, minor):
#
def checkMhxVersion(major, minor):
global warnedVersion
if major != MAJOR_VERSION or minor != MINOR_VERSION:
if warnedVersion:
return
else:
msg = (
"Wrong MHX version\n" +
"Expected MHX %d.%d but the loaded file has version MHX %d.%d\n" % (MAJOR_VERSION, MINOR_VERSION, major, minor) +
"You can disable this error message by deselecting the Enforce version option when importing. " +
"Alternatively, you can try to download the most recent nightly build from www.makehuman.org. " +
"The current version of the import script is located in the importers/mhx/blender25x folder and is called import_scene_mhx.py. " +
"The version distributed with Blender builds from www.graphicall.org may be out of date.\n"
)
if toggle & T_EnforceVersion:
raise NameError(msg)
else:
print(msg)
warnedVersion = True
return
#
# parse(tokens):
#
ifResult = False
def parse(tokens):
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
global MHX249, ifResult, theScale, defaultScale, One
for (key, val, sub) in tokens:
# print("Parse %s" % key)
data = None
if key == 'MHX':
checkMhxVersion(int(val[0]), int(val[1]))
elif key == 'MHX249':
MHX249 = eval(val[0])
print("Blender 2.49 compatibility mode is %s\n" % MHX249)
elif MHX249:
pass
elif key == 'print':
msg = concatList(val)
print(msg)
elif key == 'warn':
msg = concatList(val)
print(msg)
elif key == 'error':
msg = concatList(val)
raise NameError(msg)
elif key == 'NoScale':
if eval(val[0]):
theScale = 1.0
else:
theScale = defaultScale
One = 1.0/theScale
elif key == "Object":
parseObject(val, sub)
elif key == "Mesh":
data = parseMesh(val, sub)
elif key == "Armature":
data = parseArmature(val, sub)
elif key == "Pose":
data = parsePose(val, sub)
elif key == "Action":
data = parseAction(val, sub)
elif key == "Material":
data = parseMaterial(val, sub)
elif key == "Texture":
data = parseTexture(val, sub)
elif key == "Image":
data = parseImage(val, sub)
elif key == "Curve":
data = parseCurve(val, sub)
elif key == "TextCurve":
data = parseTextCurve(val, sub)
elif key == "Lattice":
data = parseLattice(val, sub)
elif key == "Group":
data = parseGroup(val, sub)
elif key == "Lamp":
data = parseLamp(val, sub)
elif key == "World":
data = parseWorld(val, sub)
elif key == "Scene":
data = parseScene(val, sub)
elif key == "Process":
parseProcess(val, sub)
elif key == 'AnimationData':
try:
ob = loadedData['Object'][val[0]]
except:
ob = None
if ob:
bpy.context.scene.objects.active = ob
parseAnimationData(ob, val, sub)
elif key == 'ShapeKeys':
try:
ob = loadedData['Object'][val[0]]
except:
ob = None
if ob:
bpy.context.scene.objects.active = ob
parseShapeKeys(ob, ob.data, val, sub)
else:
data = parseDefaultType(key, val, sub)
if data and key != 'Mesh':
print( data )
return
#
# parseDefaultType(typ, args, tokens):
global todo
name = args[0]
data = None
expr = "bpy.data.%s.new('%s')" % (Plural[typ], name)
print(expr)
data = eval(expr)
print(" ok", data)
bpyType = typ.capitalize()
print(bpyType, name, data)
loadedData[bpyType][name] = data
if data == None:
return None
for (key, val, sub) in tokens:
#print("%s %s" % (key, val))
defaultKey(key, val, sub, 'data', [], globals(), locals())
print("Done ", data)
return data
# concatList(elts)
string = ""
for elt in elts:
string += " %s" % elt
return string
# parseAction(args, tokens):
# parseFCurve(fcu, args, tokens):
# parseKeyFramePoint(pt, args, tokens):
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
name = args[0]
if invalid(args[1]):
return
ob = bpy.context.object
bpy.ops.object.mode_set(mode='POSE')
if ob.animation_data:
ob.animation_data.action = None
created = {}
for (key, val, sub) in tokens:
if key == 'FCurve':
prepareActionFCurve(ob, created, val, sub)
act = ob.animation_data.action
loadedData['Action'][name] = act
if act == None:
print("Ignoring action %s" % name)
return act
act.name = name
print("Action", name, act, ob)
for (key, val, sub) in tokens:
if key == 'FCurve':
fcu = parseActionFCurve(act, ob, val, sub)
else:
defaultKey(key, val, sub, 'act', [], globals(), locals())
ob.animation_data.action = None
bpy.ops.object.mode_set(mode='OBJECT')
return act
def prepareActionFCurve(ob, created, args, tokens):
dataPath = args[0]
index = args[1]
(expr, channel) = channelFromDataPath(dataPath, index)
try:
if channel in created[expr]:
return
else:
created[expr].append(channel)
except:
created[expr] = [channel]
times = []
for (key, val, sub) in tokens:
if key == 'kp':
times.append(int(val[0]))
try:
data = eval(expr)
except:
print("Ignoring illegal expression: %s" % expr)
return
n = 0
for t in times:
#bpy.context.scene.current_frame = t
bpy.ops.anim.change_frame(frame = t)
try:
data.keyframe_insert(channel)
n += 1
except:
pass
#print("failed", data, expr, channel)
if n != len(times):
print("Mismatch", n, len(times), expr, channel)
return
words = dataPath.split(']')
if len(words) == 1:
# location
expr = "ob"
channel = dataPath
elif len(words) == 2:
# pose.bones["tongue"].location
expr = "ob.%s]" % (words[0])
cwords = words[1].split('.')
channel = cwords[1]
elif len(words) == 3:
# pose.bones["brow.R"]["mad"]
expr = "ob.%s]" % (words[0])
cwords = words[1].split('"')
channel = cwords[1]
# print(expr, channel, index)
return (expr, channel)
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
dataPath = args[0]
index = args[1]
(expr, channel) = channelFromDataPath(dataPath, index)
index = int(args[1])
success = False
for fcu in act.fcurves:
(expr1, channel1) = channelFromDataPath(fcu.data_path, fcu.array_index)
if expr1 == expr and channel1 == channel and fcu.array_index == index:
success = True
break
if not success:
return None
n = 0
for (key, val, sub) in tokens:
if key == 'kp':
try:
pt = fcu.keyframe_points[n]
pt.interpolation = 'LINEAR'
pt = parseKeyFramePoint(pt, val, sub)
n += 1
except:
pass
#print(tokens)
#raise NameError("kp", fcu, n, len(fcu.keyframe_points), val)
else:
defaultKey(key, val, sub, 'fcu', [], globals(), locals())
return fcu
pt.co = (float(args[0]), float(args[1]))
if len(args) > 2:
pt.handle1 = (float(args[2]), float(args[3]))
pt.handle2 = (float(args[3]), float(args[5]))
return pt
#
# parseAnimationData(rna, args, tokens):
# parseDriver(drv, args, tokens):
# parseDriverVariable(var, args, tokens):
#
def parseAnimationData(rna, args, tokens):
if not eval(args[1]):
return
if rna.animation_data == None:
rna.animation_data_create()
adata = rna.animation_data
for (key, val, sub) in tokens:
if key == 'FCurve':
fcu = parseAnimDataFCurve(adata, rna, val, sub)
else:
defaultKey(key, val, sub, 'adata', [], globals(), locals())
return adata
def parseAnimDataFCurve(adata, rna, args, tokens):
if invalid(args[2]):
return
dataPath = args[0]
index = int(args[1])
# print("parseAnimDataFCurve", adata, dataPath, index)
for (key, val, sub) in tokens:
if key == 'Driver':
fcu = parseDriver(adata, dataPath, index, rna, val, sub)
elif key == 'FModifier':
parseFModifier(fcu, val, sub)
else:
defaultKey(key, val, sub, 'fcu', [], globals(), locals())
return fcu
fcurve = con.driver_add("influence", 0)
driver = fcurve.driver
driver.type = 'AVERAGE'
"""
def parseDriver(adata, dataPath, index, rna, args, tokens):
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
if dataPath[-1] == ']':
words = dataPath.split(']')
expr = "rna." + words[0] + ']'
pwords = words[1].split('"')
prop = pwords[1]
# print("prop", expr, prop)
bone = eval(expr)
return None
else:
words = dataPath.split('.')
channel = words[-1]
expr = "rna"
for n in range(len(words)-1):
expr += "." + words[n]
expr += ".driver_add('%s', index)" % channel
# print("expr", rna, expr)
fcu = eval(expr)
drv = fcu.driver
drv.type = args[0]
for (key, val, sub) in tokens:
if key == 'DriverVariable':
var = parseDriverVariable(drv, rna, val, sub)
else:
defaultKey(key, val, sub, 'drv', [], globals(), locals())
return fcu
def parseDriverVariable(drv, rna, args, tokens):
var = drv.variables.new()
var.name = args[0]
var.type = args[1]
nTarget = 0
# print("var", var, var.name, var.type)
for (key, val, sub) in tokens:
if key == 'Target':
parseDriverTarget(var, nTarget, rna, val, sub)
nTarget += 1
else:
defaultKey(key, val, sub, 'var', [], globals(), locals())
return var
#fmod = fcu.modifiers.new()
fmod = fcu.modifiers[0]
#fmod.type = args[0]
#print("fmod", fmod, fmod.type)
for (key, val, sub) in tokens:
defaultKey(key, val, sub, 'fmod', [], globals(), locals())
return fmod
var = driver.variables.new()
var.name = target_bone
var.targets[0].id_type = 'OBJECT'
var.targets[0].id = obj
var.targets[0].rna_path = driver_path
"""
def parseDriverTarget(var, nTarget, rna, args, tokens):
targ = var.targets[nTarget]
targ.id = loadedData['Object'][args[0]]
for (key, val, sub) in tokens:
defaultKey(key, val, sub, 'targ', [], globals(), locals())
return targ
# parseMaterial(args, ext, tokens):
# parseMTex(mat, args, tokens):
# parseTexture(args, tokens):
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
global todo
name = args[0]
mat = bpy.data.materials.new(name)
if mat == None:
return None
loadedData['Material'][name] = mat
for (key, val, sub) in tokens:
if key == 'MTex':
parseMTex(mat, val, sub)
elif key == 'Ramp':
parseRamp(mat, val, sub)
elif key == 'RaytraceTransparency':
parseDefault(mat.raytrace_transparency, sub, {}, [])
elif key == 'Halo':
parseDefault(mat.halo, sub, {}, [])
elif key == 'SSS':
parseDefault(mat.subsurface_scattering, sub, {}, [])
elif key == 'Strand':
parseDefault(mat.strand, sub, {}, [])
elif key == 'NodeTree':
mat.use_nodes = True
parseNodeTree(mat.node_tree, val, sub)
else:
exclude = ['specular_intensity', 'tangent_shading']
defaultKey(key, val, sub, 'mat', [], globals(), locals())
return mat
global todo
index = int(args[0])
texname = args[1]
texco = args[2]
mapto = args[3]
tex = loadedData['Texture'][texname]
mtex = mat.texture_slots.add()
mtex.texture_coords = texco
mtex.texture = tex
for (key, val, sub) in tokens:
defaultKey(key, val, sub, "mtex", [], globals(), locals())
return mtex
global todo
if verbosity > 2:
print( "Parsing texture %s" % args )
name = args[0]
tex = bpy.data.textures.new(name=name, type=args[1])
loadedData['Texture'][name] = tex
for (key, val, sub) in tokens:
if key == 'Image':
try:
imgName = val[0]
img = loadedData['Image'][imgName]
tex.image = img
except:
msg = "Unable to load image '%s'" % val[0]
elif key == 'Ramp':
parseRamp(tex, val, sub)
elif key == 'NodeTree':
tex.use_nodes = True
parseNodeTree(tex.node_tree, val, sub)
else:
defaultKey(key, val, sub, "tex", ['use_nodes', 'use_textures', 'contrast'], globals(), locals())
return tex
nvar = "data.%s" % args[0]
use = "data.use_%s = True" % args[0]
exec(use)
ramp = eval(nvar)
elts = ramp.elements
n = 0
for (key, val, sub) in tokens:
# print("Ramp", key, val)
if key == 'Element':
elts[n].color = eval(val[0])
elts[n].position = eval(val[1])
n += 1
else:
defaultKey(key, val, sub, "tex", ['use_nodes', 'use_textures', 'contrast'], globals(), locals())
sss = mat.subsurface_scattering
for (key, val, sub) in tokens:
defaultKey(key, val, sub, "sss", [], globals(), locals())
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
strand = mat.strand
for (key, val, sub) in tokens:
defaultKey(key, val, sub, "strand", [], globals(), locals())
#
# parseNodeTree(tree, args, tokens):
# parseNode(node, args, tokens):
# parseSocket(socket, args, tokens):
#
def parseNodeTree(tree, args, tokens):
return
print("Tree", tree, args)
print(list(tree.nodes))
tree.name = args[0]
for (key, val, sub) in tokens:
if key == 'Node':
parseNodes(tree.nodes, val, sub)
else:
defaultKey(key, val, sub, "tree", [], globals(), locals())
def parseNodes(nodes, args, tokens):
print("Nodes", nodes, args)
print(list(nodes))
node.name = args[0]
for (key, val, sub) in tokens:
if key == 'Inputs':
parseSocket(node.inputs, val, sub)
elif key == 'Outputs':
parseSocket(node.outputs, val, sub)
else:
defaultKey(key, val, sub, "node", [], globals(), locals())
def parseNode(node, args, tokens):
print("Node", node, args)
print(list(node.inputs), list(node.outputs))
node.name = args[0]
for (key, val, sub) in tokens:
if key == 'Inputs':
parseSocket(node.inputs, val, sub)
elif key == 'Outputs':
parseSocket(node.outputs, val, sub)
else:
defaultKey(key, val, sub, "node", [], globals(), locals())
def parseSocket(socket, args, tokens):
print("Socket", socket, args)
socket.name = args[0]
for (key, val, sub) in tokens:
if key == 'Node':
parseNode(tree.nodes, val, sub)
else:
defaultKey(key, val, sub, "tree", [], globals(), locals())
#
# doLoadImage(filepath):
# loadImage(filepath):
# parseImage(args, tokens):
#
def doLoadImage(filepath):
path1 = os.path.expanduser(filepath)
file1 = os.path.realpath(path1)
if os.path.isfile(file1):
print( "Found file "+file1 )
try:
img = bpy.data.images.load(file1)
return img
except:
print( "Cannot read image" )
return None
else:
print( "No file "+file1 )
return None
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
global TexDir, warnedTextureDir, loadedData
texDir = os.path.expanduser(TexDir)
path1 = os.path.expanduser(filepath)
file1 = os.path.realpath(path1)
(path, filename) = os.path.split(file1)
(name, ext) = os.path.splitext(filename)
print( "Loading ", filepath, " = ", filename )
# img = doLoadImage(texDir+"/"+name+".png")
# if img:
# return img
img = doLoadImage(texDir+"/"+filename)
if img:
return img
# img = doLoadImage(path+"/"+name+".png")
# if img:
# return img
img = doLoadImage(path+"/"+filename)
if img:
return img
if warnedTextureDir:
return None
warnedTextureDir = True
return None
TexDir = Draw.PupStrInput("TexDir? ", path, 100)
texDir = os.path.expanduser(TexDir)
img = doLoadImage(texDir+"/"+name+".png")
if img:
return img
img = doLoadImage(TexDir+"/"+filename)
return img
global todo
imgName = args[0]
img = None
for (key, val, sub) in tokens:
if key == 'Filename':
filename = val[0]
for n in range(1,len(val)):
filename += " " + val[n]
img = loadImage(filename)
if img == None:
return None
img.name = imgName
else:
defaultKey(key, val, sub, "img", ['depth', 'dirty', 'has_data', 'size', 'type'], globals(), locals())
print ("Image %s" % img )
loadedData['Image'][imgName] = img
return img
#
# parseObject(args, tokens):
# createObject(type, name, data, datName):
# setObjectAndData(args, typ):
#
if verbosity > 2:
print( "Parsing object %s" % args )
name = args[0]
typ = args[1]
datName = args[2]