Newer
Older
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# NOTE: moved the winmgr properties to __init__ and scene
# search for context.scene.advanced_objects1
bl_info = {
"name": "Laplacian Lightning",
"author": "teldredge",
"blender": (2, 78, 0),
"location": "3D View > Toolshelf > Create > Laplacian Lightning",
"description": "Lightning mesh generator using laplacian growth algorithm",
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"category": "Object"}
# BLENDER LAPLACIAN LIGHTNING
# teldredge
# www.funkboxing.com
# https://developer.blender.org/T27189
# using algorithm from
# FAST SIMULATION OF LAPLACIAN GROWTH (FSLG)
# http://gamma.cs.unc.edu/FRAC/
# and a few ideas ideas from
# FAST ANIMATION OF LIGHTNING USING AN ADAPTIVE MESH (FALUAM)
# http://gamma.cs.unc.edu/FAST_LIGHTNING/
"""
----- RELEASE LOG/NOTES/PONTIFICATIONS -----
v0.1.0 - 04.11.11
basic generate functions and UI
object creation report (Custom Properties: FSLG_REPORT)
v0.2.0 - 04.15.11
started spelling laplacian right.
add curve function (not in UI) ...twisting problem
classify stroke by MAIN path, h-ORDER paths, TIP paths
jitter cells for mesh creation
add materials if present
v0.2.1 - 04.16.11
mesh classification speedup
v0.2.2 - 04.21.11
fxns to write/read array to file
restrict growth to insulator cells (object bounding box)
origin/ground defineable by object
gridunit more like 'resolution'
v0.2.3 - 04.24.11
cloud attractor object (termintates loop if hit)
secondary path orders (hOrder) disabled in UI (set to 1)
v0.2.4 - 04.26.11
fixed object selection in UI
will not run if required object not selected
moved to view 3d > toolbox
v0.2.5 - 05.08.11
testing for 2.57b
single mesh output (for build modifier)
speedups (dist fxn)
v0.2.6 - 06.20.11
scale/pos on 'write to cubes' works now
if origin obj is mesh, uses all verts as initial charges
semi-helpful tooltips
speedups, faster dedupe fxn, faster classification
use any shape mesh obj as insulator mesh
must have rot=0, scale=1, origin set to geometry
often fails to block bolt with curved/complex shapes
separate single and multi mesh creation
v0.2.7 - 01.05.13
fixed the issue that prevented enabling the add-on
fixed makeMeshCube fxn
disabled visualization for voxels
v0.x -
-prevent create_setup_objects from generating duplicates
-fix vis fxn to only buildCPGraph once for VM or VS
-improve list fxns (rid of ((x,y,z),w) and use (x,y,z,w)), use 'sets'
-create python cmodule for a few of most costly fxns
i have pretty much no idea how to do this yet
-cloud and insulator can be groups of MESH objs
-text output, possibly to save on interrupt, allow continue from text
-?hook modifiers from tips->sides->main, weight w/ vert groups
-user defined 'attractor' path
-fix add curve function
-animated arcs via. ionization path
-environment map boundary conditions - requires Eqn. 15 from FSLG.
-assign wattage at each segment for HDRI
-?default settings for -lightning, -teslacoil, -spark/arc
-fix hOrder functionality
-multiple 'MAIN' brances for non-lightning discharges
-n-symmetry option, create mirror images, snowflakes, etc...
"""
import bpy
import time
import random
from bpy.types import (
Operator,
Panel,
)
# from math import sqrt
from mathutils import Vector
import struct
import bisect
import os.path
# -- Globals --
notZero = 0.0000000001
# set to True to enable debug prints
DEBUG = False
# Utility Functions
# func - function name, text - message, var - variable to print
# it can have one variable to observe
def debug_prints(func="", text="Message", var=None):
global DEBUG
if DEBUG:
print("\n[{}]\nmessage: {}".format(func, text))
if var:
print("variable: ", var)
# pass variables just like for the regular prints
def debug_print_vars(*args, **kwargs):
global DEBUG
if DEBUG:
print(*args, **kwargs)
def within(x, y, d):
# CHECK IF x - d <= y <= x + d
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
if x - d <= y and x + d >= y:
return True
else:
return False
def dist(ax, ay, az, bx, by, bz):
dv = Vector((ax, ay, az)) - Vector((bx, by, bz))
d = dv.length
return d
def splitList(aList, idx):
ll = []
for x in aList:
ll.append(x[idx])
return ll
def splitListCo(aList):
ll = []
for p in aList:
ll.append((p[0], p[1], p[2]))
return ll
def getLowHigh(aList):
tLow = aList[0]
tHigh = aList[0]
for a in aList:
if a < tLow:
tLow = a
if a > tHigh:
tHigh = a
return tLow, tHigh
def weightedRandomChoice(aList):
tL = []
tweight = 0
for a in range(len(aList)):
idex = a
weight = aList[a]
if weight > 0.0:
tweight += weight
tL.append((tweight, idex))
i = bisect.bisect(tL, (random.uniform(0, tweight), None))
r = tL[i][1]
return r
def getStencil3D_26(x, y, z):
nL = []
for xT in range(x - 1, x + 2):
for yT in range(y - 1, y + 2):
for zT in range(z - 1, z + 2):
nL.append((xT, yT, zT))
nL.remove((x, y, z))
return nL
def jitterCells(aList, jit):
j = jit / 2
bList = []
for a in aList:
ax = a[0] + random.uniform(-j, j)
ay = a[1] + random.uniform(-j, j)
az = a[2] + random.uniform(-j, j)
bList.append((ax, ay, az))
return bList
def deDupe(seq, idfun=None):
# Thanks to this guy - http://www.peterbe.com/plog/uniqifiers-benchmark
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
# Visulization functions
def writeArrayToVoxel(arr, filename):
gridS = 64
half = int(gridS / 2)
bitOn = 255
aGrid = [[[0 for z in range(gridS)] for y in range(gridS)] for x in range(gridS)]
for a in arr:
try:
aGrid[a[0] + half][a[1] + half][a[2] + half] = bitOn
except:
debug_prints(func="writeArrayToVoxel", text="Particle beyond voxel domain")
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
file = open(filename, "wb")
for z in range(gridS):
for y in range(gridS):
for x in range(gridS):
file.write(struct.pack('B', aGrid[x][y][z]))
file.flush()
file.close()
def writeArrayToFile(arr, filename):
file = open(filename, "w")
for a in arr:
tstr = str(a[0]) + ',' + str(a[1]) + ',' + str(a[2]) + '\n'
file.write(tstr)
file.close
def readArrayFromFile(filename):
file = open(filename, "r")
arr = []
for f in file:
pt = f[0:-1].split(',')
arr.append((int(pt[0]), int(pt[1]), int(pt[2])))
return arr
def makeMeshCube_OLD(msize):
msize = msize / 2
mmesh = bpy.data.meshes.new('q')
mmesh.vertices.add(8)
mmesh.vertices[0].co = [-msize, -msize, -msize]
mmesh.vertices[1].co = [-msize, msize, -msize]
mmesh.vertices[2].co = [msize, msize, -msize]
mmesh.vertices[3].co = [msize, -msize, -msize]
mmesh.vertices[4].co = [-msize, -msize, msize]
mmesh.vertices[5].co = [-msize, msize, msize]
mmesh.vertices[6].co = [msize, msize, msize]
mmesh.vertices[7].co = [msize, -msize, msize]
mmesh.faces.add(6)
mmesh.faces[0].vertices_raw = [0, 1, 2, 3]
mmesh.faces[1].vertices_raw = [0, 4, 5, 1]
mmesh.faces[2].vertices_raw = [2, 1, 5, 6]
mmesh.faces[3].vertices_raw = [3, 2, 6, 7]
mmesh.faces[4].vertices_raw = [0, 3, 7, 4]
mmesh.faces[5].vertices_raw = [5, 4, 7, 6]
mmesh.update(calc_edges=True)
return(mmesh)
def makeMeshCube(msize):
m2 = msize / 2
# verts = [(0,0,0),(0,5,0),(5,5,0),(5,0,0),(0,0,5),(0,5,5),(5,5,5),(5,0,5)]
verts = [(-m2, -m2, -m2), (-m2, m2, -m2), (m2, m2, -m2), (m2, -m2, -m2),
(-m2, -m2, m2), (-m2, m2, m2), (m2, m2, m2), (m2, -m2, m2)]
faces = [
(0, 1, 2, 3), (4, 5, 6, 7), (0, 4, 5, 1),
(1, 5, 6, 2), (2, 6, 7, 3), (3, 7, 4, 0)
]
# Define mesh and object
mmesh = bpy.data.meshes.new("Cube")
# Create mesh
mmesh.from_pydata(verts, [], faces)
mmesh.update(calc_edges=True)
return(mmesh)
def writeArrayToCubes(arr, gridBU, orig, cBOOL=False, jBOOL=True):
for a in arr:
x = a[0]
y = a[1]
z = a[2]
me = makeMeshCube(gridBU)
ob = bpy.data.objects.new('xCUBE', me)
ob.location.x = (x * gridBU) + orig[0]
ob.location.y = (y * gridBU) + orig[1]
ob.location.z = (z * gridBU) + orig[2]
if cBOOL: # mostly unused
# pos + blue, neg - red, zero: black
col = (1.0, 1.0, 1.0, 1.0)
if a[3] == 0:
col = (0.0, 0.0, 0.0, 1.0)
if a[3] < 0:
col = (-a[3], 0.0, 0.0, 1.0)
if a[3] > 0:
col = (0.0, 0.0, a[3], 1.0)
ob.color = col
bpy.context.scene.objects.link(ob)
bpy.context.scene.update()
# Selects all cubes w/ ?bpy.ops.object.join() b/c
# Can't join all cubes to a single mesh right... argh...
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
for q in bpy.context.scene.objects:
q.select = False
if q.name[0:5] == 'xCUBE':
q.select = True
bpy.context.scene.objects.active = q
def addVert(ob, pt, conni=-1):
mmesh = ob.data
mmesh.vertices.add(1)
vcounti = len(mmesh.vertices) - 1
mmesh.vertices[vcounti].co = [pt[0], pt[1], pt[2]]
if conni > -1:
mmesh.edges.add(1)
ecounti = len(mmesh.edges) - 1
mmesh.edges[ecounti].vertices = [conni, vcounti]
mmesh.update()
def addEdge(ob, va, vb):
mmesh = ob.data
mmesh.edges.add(1)
ecounti = len(mmesh.edges) - 1
mmesh.edges[ecounti].vertices = [va, vb]
mmesh.update()
def newMesh(mname):
mmesh = bpy.data.meshes.new(mname)
omesh = bpy.data.objects.new(mname, mmesh)
bpy.context.scene.objects.link(omesh)
return omesh
def writeArrayToMesh(mname, arr, gridBU, rpt=None):
mob = newMesh(mname)
mob.scale = (gridBU, gridBU, gridBU)
if rpt:
addReportProp(mob, rpt)
addVert(mob, arr[0], -1)
for ai in range(1, len(arr)):
a = arr[ai]
addVert(mob, a, ai - 1)
return mob
# out of order - some problem with it adding (0,0,0)
def writeArrayToCurves(cname, arr, gridBU, bd=.05, rpt=None):
cur = bpy.data.curves.new('fslg_curve', 'CURVE')
cur.use_fill_front = False
cur.use_fill_back = False
cur.bevel_depth = bd
cur.bevel_resolution = 2
cob = bpy.data.objects.new(cname, cur)
cob.scale = (gridBU, gridBU, gridBU)
if rpt:
addReportProp(cob, rpt)
bpy.context.scene.objects.link(cob)
cur.splines.new('BEZIER')
cspline = cur.splines[0]
div = 1 # spacing for handles (2 - 1/2 way, 1 - next bezier)
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
for a in range(len(arr)):
cspline.bezier_points.add(1)
bp = cspline.bezier_points[len(cspline.bezier_points) - 1]
if a - 1 < 0:
hL = arr[a]
else:
hx = arr[a][0] - ((arr[a][0] - arr[a - 1][0]) / div)
hy = arr[a][1] - ((arr[a][1] - arr[a - 1][1]) / div)
hz = arr[a][2] - ((arr[a][2] - arr[a - 1][2]) / div)
hL = (hx, hy, hz)
if a + 1 > len(arr) - 1:
hR = arr[a]
else:
hx = arr[a][0] + ((arr[a + 1][0] - arr[a][0]) / div)
hy = arr[a][1] + ((arr[a + 1][1] - arr[a][1]) / div)
hz = arr[a][2] + ((arr[a + 1][2] - arr[a][2]) / div)
hR = (hx, hy, hz)
bp.co = arr[a]
bp.handle_left = hL
bp.handle_right = hR
def addArrayToMesh(mob, arr):
addVert(mob, arr[0], -1)
mmesh = mob.data
vcounti = len(mmesh.vertices) - 1
for ai in range(1, len(arr)):
a = arr[ai]
addVert(mob, a, len(mmesh.vertices) - 1)
def addMaterial(ob, matname):
mat = bpy.data.materials[matname]
ob.active_material = mat
def writeStokeToMesh(arr, jarr, MAINi, HORDERi, TIPSi, orig, gs, rpt=None):
# main branch
debug_prints(func="writeStokeToMesh", text='Writing main branch')
for x in MAINi:
llmain.append(jarr[x])
mob = writeArrayToMesh('la0MAIN', llmain, gs)
mob.location = orig
for hOi in range(len(HORDERi)):
debug_prints(func="writeStokeToMesh", text="Writing order", var=hOi)
hO = HORDERi[hOi]
hob = newMesh('la1H' + str(hOi))
for y in hO:
llHO = []
for x in y:
llHO.append(jarr[x])
addArrayToMesh(hob, llHO)
hob.scale = (gs, gs, gs)
hob.location = orig
# tips
debug_prints(func="writeStokeToMesh", text="Writing tip paths")
tob = newMesh('la2TIPS')
for y in TIPSi:
llt = []
for x in y:
llt.append(jarr[x])
addArrayToMesh(tob, llt)
tob.scale = (gs, gs, gs)
tob.location = orig
# add materials to objects (if they exist)
try:
addMaterial(mob, 'edgeMAT-h0')
addMaterial(hob, 'edgeMAT-h1')
addMaterial(tob, 'edgeMAT-h2')
debug_prints(func="writeStokeToMesh", text="Added materials")
debug_prints(func="writeStokeToMesh", text="Materials not found")
# add generation report to all meshes
if rpt:
addReportProp(mob, rpt)
addReportProp(hob, rpt)
addReportProp(tob, rpt)
def writeStokeToSingleMesh(arr, jarr, orig, gs, mct, rpt=None):
sgarr = buildCPGraph(arr, mct)
llALL = []
Aob = newMesh('laALL')
for pt in jarr:
addVert(Aob, pt)
for cpi in range(len(sgarr)):
ci = sgarr[cpi][0]
pi = sgarr[cpi][1]
addEdge(Aob, pi, ci)
Aob.location = orig
Aob.scale = ((gs, gs, gs))
if rpt:
addReportProp(Aob, rpt)
def visualizeArray(cg, oob, gs, vm, vs, vc, vv, rst):
winmgr = bpy.context.scene.advanced_objects1
# IN: (cellgrid, origin, gridscale,
# mulimesh, single mesh, cubes, voxels, report sting)
origin = oob.location
# deal with vert multi-origins
oct = 2
if oob.type == 'MESH':
oct = len(oob.data.vertices)
if vm or vs:
cjarr = jitterCells(cg, 1)
if vm: # write array to multi mesh
aMi, aHi, aTi = classifyStroke(cg, oct, winmgr.HORDER)
debug_prints(func="visualizeArray", text="Writing to multi-mesh")
writeStokeToMesh(cg, cjarr, aMi, aHi, aTi, origin, gs, rst)
debug_prints(func="visualizeArray", text="Multi-mesh written")
if vs: # write to single mesh
debug_prints(func="visualizeArray", text="Writing to single mesh")
writeStokeToSingleMesh(cg, cjarr, origin, gs, oct, rst)
debug_prints(func="visualizeArray", text="Single mesh written")
if vc: # write array to cube objects
debug_prints(func="visualizeArray", text="Writing to cubes")
writeArrayToCubes(cg, gs, origin)
debug_prints(func="visualizeArray", text="Cubes written")
if vv: # write array to voxel data file
debug_prints(func="visualizeArray", text="Writing to voxels")
fname = "FSLGvoxels.raw"
path = os.path.dirname(bpy.data.filepath)
writeArrayToVoxel(cg, path + "\\" + fname)
debug_prints(func="visualizeArray",
text="Voxel data written to:", var=path + "\\" + fname)
# read/write array to file (might not be necessary)
# tfile = 'c:\\testarr.txt'
# writeArrayToFile(cg, tfile)
# cg = readArrayFromFile(tfile)
# read/write array to curves (out of order)
# writeArrayToCurves('laMAIN', llmain, .10, .25)
# Algorithm functions
# from faluam paper
# plus some stuff i made up
def buildCPGraph(arr, sti=2):
# in -xyz array as built by generator
# out -[(childindex, parentindex)]
# sti - start index, 2 for empty, len(me.vertices) for mesh
sgarr = []
sgarr.append((1, 0))
for ai in range(sti, len(arr)):
cs = arr[ai]
cpts = arr[0:ai]
cslap = getStencil3D_26(cs[0], cs[1], cs[2])
for nc in cslap:
ct = cpts.count(nc)
if ct > 0:
cti = cpts.index(nc)
sgarr.append((ai, cti))
return sgarr
def buildCPGraph_WORKINPROGRESS(arr, sti=2):
# in -xyz array as built by generator
# out -[(childindex, parentindex)]
# sti - start index, 2 for empty, len(me.vertices) for mesh
sgarr = []
sgarr.append((1, 0))
ctix = 0
for ai in range(sti, len(arr)):
cs = arr[ai]
# cpts = arr[0:ai]
cpts = arr[ctix:ai]
cslap = getStencil3D_26(cs[0], cs[1], cs[2])
for nc in cslap:
ct = cpts.count(nc)
if ct > 0:
# cti = cpts.index(nc)
cti = ctix + cpts.index(nc)
ctix = cpts.index(nc)
sgarr.append((ai, cti))
return sgarr
def findChargePath(oc, fc, ngraph, restrict=[], partial=True):
# oc -origin charge index, fc -final charge index
# ngraph -node graph, restrict- index of sites cannot traverse
# partial -return partial path if restriction encounterd
cList = splitList(ngraph, 0)
pList = splitList(ngraph, 1)
aRi = []
cNODE = fc
for x in range(len(ngraph)):
pNODE = pList[cList.index(cNODE)]
aRi.append(cNODE)
cNODE = pNODE
npNODECOUNT = cList.count(pNODE)
if cNODE == oc: # stop if origin found
aRi.append(cNODE) # return path
if npNODECOUNT == 0: # stop if no parents
return [] # return []
if pNODE in restrict: # stop if parent is in restriction
if partial: # return partial or []
aRi.append(cNODE)
return aRi
else:
return []
def findTips(arr):
lt = []
for ai in arr[0: len(arr) - 1]:
a = ai[0]
cCOUNT = 0
for bi in arr:
b = bi[1]
if a == b:
cCOUNT += 1
if cCOUNT == 0:
lt.append(a)
return lt
def findChannelRoots(path, ngraph, restrict=[]):
roots = []
for ai in range(len(ngraph)):
chi = ngraph[ai][0]
par = ngraph[ai][1]
if par in path and chi not in path and chi not in restrict:
roots.append(par)
droots = deDupe(roots)
return droots
def findChannels(roots, tips, ngraph, restrict):
cPATHS = []
for ri in range(len(roots)):
r = roots[ri]
sL = 1
sPATHi = []
for ti in range(len(tips)):
t = tips[ti]
if t < r:
continue
tPATHi = findChargePath(r, t, ngraph, restrict, False)
tL = len(tPATHi)
if tL > sL:
if countChildrenOnPath(tPATHi, ngraph) > 1:
sL = tL
sPATHi = tPATHi
tTEMP = t
tiTEMP = ti
if len(sPATHi) > 0:
debug_print_vars(
"\n[findChannels]\n",
"found path/idex from", ri, 'of',
len(roots), "possible | tips:", tTEMP, tiTEMP
)
cPATHS.append(sPATHi)
tips.remove(tTEMP)
return cPATHS
def findChannels_WORKINPROGRESS(roots, ttips, ngraph, restrict):
cPATHS = []
tips = list(ttips)
for ri in range(len(roots)):
r = roots[ri]
sL = 1
sPATHi = []
tipREMOVE = [] # checked tip indexes, to be removed for next loop
for ti in range(len(tips)):
t = tips[ti]
if ti < ri:
continue
tPATHi = findChargePath(r, t, ngraph, restrict, False)
tL = len(tPATHi)
if tL > sL:
if countChildrenOnPath(tPATHi, ngraph) > 1:
sL = tL
sPATHi = tPATHi
tTEMP = t
tiTEMP = ti
if tL > 0:
tipREMOVE.append(t)
if len(sPATHi) > 0:
debug_print_vars(
"\n[findChannels_WORKINPROGRESS]\n",
"found path from root idex", ri, 'of',
len(roots), "possible roots | of tips= ", len(tips)
)
cPATHS.append(sPATHi)
for q in tipREMOVE:
tips.remove(q)
return cPATHS
def countChildrenOnPath(aPath, ngraph, quick=True):
# return how many branches
# count when node is a parent >1 times
# quick -stop and return after first
cCOUNT = 0
pList = splitList(ngraph, 1)
for ai in range(len(aPath) - 1):
ap = aPath[ai]
pc = pList.count(ap)
if quick and pc > 1:
return pc
return cCOUNT
# classify channels into 'main', 'hORDER/secondary' and 'side'
def classifyStroke(sarr, mct, hORDER=1):
debug_prints(func="classifyStroke", text="Classifying stroke")
# build child/parent graph (indexes of sarr)
sgarr = buildCPGraph(sarr, mct)
# find main channel
debug_prints(func="classifyStroke", text="Finding MAIN")
oCharge = sgarr[0][1]
fCharge = sgarr[len(sgarr) - 1][0]
aMAINi = findChargePath(oCharge, fCharge, sgarr)
# find tips
debug_prints(func="classifyStroke", text="Finding TIPS")
aTIPSi = findTips(sgarr)
# find horder channel roots
# hcount = orders bewteen main and side/tips
# !!!still buggy!!!
hRESTRICT = list(aMAINi) # add to this after each time
allHPATHSi = [] # all ho paths: [[h0], [h1]...]
curPATHSi = [aMAINi] # list of paths find roots on
for h in range(hORDER):
allHPATHSi.append([])
for pi in range(len(curPATHSi)): # loop through all paths in this order
# get roots for this path
aHROOTSi = findChannelRoots(p, sgarr, hRESTRICT)
debug_print_vars(
"\n[classifyStroke]\n",
"found", len(aHROOTSi), "roots in ORDER", h, ":paths:", len(curPATHSi)
)
# get channels for these roots
if len(aHROOTSi) == 0:
debug_prints(func="classifyStroke", text="No roots for found for channel")
aHPATHSi = []
continue
else:
aHPATHSiD = findChannels(aHROOTSi, aTIPSi, sgarr, hRESTRICT)
aHPATHSi = aHPATHSiD
allHPATHSi[h] += aHPATHSi
# set these channels as restrictions for next iterations
for hri in aHPATHSi:
hRESTRICT += hri
curPATHSi = aHPATHSi
# side branches, final order of heirarchy
# from tips that are not in an existing path
# back to any other point that is already on a path
aDRAWNi = []
aDRAWNi += aMAINi
for oH in allHPATHSi:
for o in oH:
aDRAWNi += o
aTPATHSi = []
for a in aTIPSi:
if a not in aDRAWNi:
aPATHi = findChargePath(oCharge, a, sgarr, aDRAWNi)
aDRAWNi += aPATHi
aTPATHSi.append(aPATHi)
return aMAINi, allHPATHSi, aTPATHSi
def voxelByVertex(ob, gs):
# 'voxelizes' verts in a mesh to list [(x,y,z),(x,y,z)]
# w/ respect gscale and ob origin (b/c should be origin obj)
# orig = ob.location
ll = []
for v in ob.data.vertices:
x = int(v.co.x / gs)
y = int(v.co.y / gs)
z = int(v.co.z / gs)
ll.append((x, y, z))
return ll
def voxelByRays(ob, orig, gs):
# mesh into a 3dgrid w/ respect gscale and bolt origin
# - does not take object rotation/scale into account
# - this is a horrible, inefficient function
# maybe the raycast/grid thing are a bad idea. but i
# have to 'voxelize the object w/ resct to gscale/origin
bbox = ob.bound_box
bbxL = bbox[0][0]
bbxR = bbox[4][0]
bbyL = bbox[0][1]
bbyR = bbox[2][1]
bbzL = bbox[0][2]
bbzR = bbox[1][2]
xct = int((bbxR - bbxL) / gs)
yct = int((bbyR - bbyL) / gs)
zct = int((bbzR - bbzL) / gs)
xs = int(xct / 2)
ys = int(yct / 2)
zs = int(zct / 2)
debug_print_vars(
"\n[voxelByRays]\n",
"Casting", xct, '/', yct, '/', zct, 'cells, total:',
xct * yct * zct, 'in obj-', ob.name
)
rc = 100 # distance to cast from
# raycast top/bottom
debug_prints(func="voxelByRays", text="Raycasting top/bottom")
for x in range(xct):
for y in range(yct):
xco = bbxL + (x * gs)
yco = bbyL + (y * gs)
v1 = ((xco, yco, rc))
v2 = ((xco, yco, -rc))
vz1 = ob.ray_cast(v1, v2)
vz2 = ob.ray_cast(v2, v1)
debug_print_vars(
"\n[voxelByRays]\n", "vz1 is: ", vz1, "\nvz2 is: ", vz2
)
# Note: the API raycast return has changed now it is
# (result, location, normal, index) - result is a boolean
if vz1[0] is True:
ll.append((x - xs, y - ys, int(vz1[1][2] * (1 / gs))))
if vz2[0] is True:
ll.append((x - xs, y - ys, int(vz2[1][2] * (1 / gs))))
# raycast front/back
debug_prints(func="voxelByRays", text="Raycasting front/back")
for x in range(xct):
for z in range(zct):
xco = bbxL + (x * gs)
zco = bbzL + (z * gs)
v1 = ((xco, rc, zco))
v2 = ((xco, -rc, zco))
vy1 = ob.ray_cast(v1, v2)
vy2 = ob.ray_cast(v2, v1)
if vy1[0] is True:
ll.append((x - xs, int(vy1[1][1] * (1 / gs)), z - zs))
if vy2[0] is True:
ll.append((x - xs, int(vy2[1][1] * (1 / gs)), z - zs))
# raycast left/right
debug_prints(func="voxelByRays", text="Raycasting left/right")
for y in range(yct):
for z in range(zct):
yco = bbyL + (y * gs)
zco = bbzL + (z * gs)
v1 = ((rc, yco, zco))
v2 = ((-rc, yco, zco))
vx1 = ob.ray_cast(v1, v2)
vx2 = ob.ray_cast(v2, v1)
if vx1[0] is True:
ll.append((int(vx1[1][0] * (1 / gs)), y - ys, z - zs))
if vx2[0] is True:
ll.append((int(vx2[1][0] * (1 / gs)), y - ys, z - zs))
# add in neighbors so bolt wont go through
nlist = []
for l in ll:
nl = getStencil3D_26(l[0], l[1], l[2])
nlist += nl
# dedupe
debug_prints(func="voxelByRays", text="Added neighbors, deduping...")
rlist = deDupe(ll + nlist)
qlist = []
# relocate grid w/ respect gscale and bolt origin
# !!!need to add in obj rot/scale here somehow...
od = Vector(
((ob.location[0] - orig[0]) / gs,
(ob.location[1] - orig[1]) / gs,
(ob.location[2] - orig[2]) / gs)
)
for r in rlist:
qlist.append((r[0] + int(od[0]), r[1] + int(od[1]), r[2] + int(od[2])))
return qlist
def fakeGroundChargePlane(z, charge):
eCL = []
xy = abs(z) / 2
eCL += [(0, 0, z, charge)]
eCL += [(xy, 0, z, charge)]
eCL += [(0, xy, z, charge)]
eCL += [(-xy, 0, z, charge)]
eCL += [(0, -xy, z, charge)]
return eCL
def addCharges(ll, charge):
# in: ll - [(x,y,z), (x,y,z)], charge - w
# out clist - [(x,y,z,w), (x,y,z,w)]
clist = []
for l in ll:
clist.append((l[0], l[1], l[2], charge))
return clist
# algorithm functions #
# from fslg #
def getGrowthProbability_KEEPFORREFERENCE(uN, aList):
# in: un -user term, clist -candidate sites, olist -candidate site charges
# out: list of [(xyz), pot, prob]
cList = splitList(aList, 0)
oList = splitList(aList, 1)
Omin, Omax = getLowHigh(oList)
if Omin == Omax:
Omax += notZero
Omin -= notZero
PdL = []
E = 0
E = notZero # divisor for (fslg - eqn. 12)
Uj = (o - Omin) / (Omax - Omin) # (fslg - eqn. 13)
for oi in range(len(oList)):
o = oList[oi]
Ui = (o - Omin) / (Omax - Omin)
Pd = (pow(Ui, uN)) / E # (fslg - eqn. 12)
PdINT = Pd * 100
PdL.append(Pd)
# work in progress, trying to speed these up
def fslg_e13(x, min, max, u):
return pow((x - min) / (max - min), u)
def addit(x, y):
return x + y
def fslg_e12(x, min, max, u, e):
return (fslg_e13(x, min, max, u) / e) * 100
def getGrowthProbability(uN, aList):
# In: uN - user_term, cList - candidate sites, oList - candidate site charges
# Out: list of prob