Skip to content
Snippets Groups Projects
Commit fad5186b authored by Maurice Raybaud's avatar Maurice Raybaud
Browse files

Formatting and fixes

* Moved: some existing functions into new separate files to improve code
readability (detailed in __init__.py docstring)

* Remove: max_intersections deprecated in pov 3.8

* Add: Validate utf-8 characters with specific API function at session's
first script init

* Add : Icons to some text fields and inviting labels

* Change default camera normal perturbation value to non zero since its
use is first driven by a boolean toggle

* Change: lists (vectors and indices) are now exported in one line by
default for better manual scene overview and debugging

* Change: a couple of tooltips corrections

* Change : renamed many variables and functions to snake_case according
to recommanded style guides

* Fix : Heightfield primitive (forward slashes were expected for
displacement texture path)

* Fix : Text nippet insertion operator

* Fix : added console print tip to check executable path on failure to
process

* Fix : tweaked finished render say command for Linux

* Fix : interface of some shader nodes broken since 2.8 api changes

* Fix : export hair particles
parent 051d4f7d
No related branches found
No related tags found
No related merge requests found
Showing
with 6302 additions and 11419 deletions
......@@ -9,8 +9,8 @@ lampdata = bpy.context.object.data
lampdata.shape = 'SQUARE'
lampdata.size = 30000000#0.02
#lampdata.size_y = 0.02
lampdata.shadow_ray_samples_x = 2
#lampdata.shadow_ray_samples_y = 3
lampdata.pov.shadow_ray_samples_x = 2
#lampdata.pov.shadow_ray_samples_y = 3
lampdata.color = (1.0, 1.0, 1.0)
lampdata.energy = 1.094316#91193 #lux
lampdata.distance =695699968
......@@ -6,8 +6,8 @@ lampdata = bpy.context.object.data
lampdata.size = 1.2
lampdata.size_y = 2.10
lampdata.shadow_ray_samples_x = 2
lampdata.shadow_ray_samples_y = 3
lampdata.pov.shadow_ray_samples_x = 2
lampdata.pov.shadow_ray_samples_y = 3
lampdata.color = (1.0, 1.0, 1.0)
lampdata.energy = 1.094316#91193 #lux
lampdata.distance = 1.0
......@@ -6,8 +6,8 @@ lampdata = bpy.context.object.data
lampdata.size = 0.038
lampdata.size_y = 2.40284
lampdata.shadow_ray_samples_x = 1
lampdata.shadow_ray_samples_y = 2
lampdata.pov.shadow_ray_samples_x = 1
lampdata.pov.shadow_ray_samples_y = 2
lampdata.color = (1.0, 0.95686274766922, 0.9490200281143188)
lampdata.energy = 4.45304#4775lm/21.446(=lux)*0.004(distance) *2 for distance is the point of half strength 6200lm?
lampdata.distance = 1.0 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
......
......@@ -6,8 +6,8 @@ lampdata = bpy.context.object.data
lampdata.size = 0.038
lampdata.size_y = 1.2192
lampdata.shadow_ray_samples_x = 1
lampdata.shadow_ray_samples_y = 2
lampdata.pov.shadow_ray_samples_x = 1
lampdata.pov.shadow_ray_samples_y = 2
lampdata.color = (0.901, 1.0, 0.979)
lampdata.energy = 2.14492#2300lm/21.446(=lux)*0.004*2.5(distance) *2 for distance is the point of half strength
lampdata.distance = 1.0 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
......
......@@ -6,8 +6,8 @@ lampdata = bpy.context.object.data
lampdata.size = 0.026
lampdata.size_y = 0.59
lampdata.shadow_ray_samples_x = 1
lampdata.shadow_ray_samples_y = 2
lampdata.pov.shadow_ray_samples_x = 1
lampdata.pov.shadow_ray_samples_y = 2
lampdata.color = (0.95686274766922, 1.0, 0.9803921580314636)
lampdata.energy = 1.25898#1350lm/21.446(=lux)*0.004*2.5(distance) *2 for distance is the point of half strength
lampdata.distance = 1.0 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
......@@ -7,8 +7,8 @@ lampdata = bpy.context.object.data
lampdata.size = 0.026
lampdata.size_y = 0.59
lampdata.shadow_ray_samples_x = 1
lampdata.shadow_ray_samples_y = 2
lampdata.pov.shadow_ray_samples_x = 1
lampdata.pov.shadow_ray_samples_y = 2
lampdata.color = (0.8313725590705872, 0.9215686321258545, 1.0)
lampdata.energy = 1.25898#1350lm/21.446(=lux)*0.004*2.5(distance) *2 for distance is the point of half strength
lampdata.distance = 1.0 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
......
......@@ -7,8 +7,8 @@ lampdata = bpy.context.object.data
lampdata.size = 0.026
lampdata.size_y = 0.59
lampdata.shadow_ray_samples_x = 1
lampdata.shadow_ray_samples_y = 2
lampdata.pov.shadow_ray_samples_x = 1
lampdata.pov.shadow_ray_samples_y = 2
lampdata.color = (1.0, 0.95686274766922, 0.8980392217636108)
lampdata.energy = 1.25898#1350lm/21.446(=lux)*0.004*2.5(distance) *2 for distance is the point of half strength
lampdata.distance = 1.0 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
......@@ -6,8 +6,8 @@ lampdata = bpy.context.object.data
lampdata.size = 0.016
lampdata.size_y = 1.149
lampdata.shadow_ray_samples_x = 1
lampdata.shadow_ray_samples_y = 2
lampdata.pov.shadow_ray_samples_x = 1
lampdata.pov.shadow_ray_samples_y = 2
lampdata.color = (1.0, 0.83, 0.986274528503418)
lampdata.energy = 4.66287 #0.93257#4.66287#5000lm/21.446(=lux)*0.004*2.5(distance) *2 for distance is the point of half strength
lampdata.distance = 0.1 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
......@@ -7,8 +7,8 @@ lampdata = bpy.context.object.data
lampdata.size = 0.033
lampdata.size_y = 0.133
lampdata.shadow_ray_samples_x = 2
lampdata.shadow_ray_samples_y = 2
lampdata.pov.shadow_ray_samples_x = 2
lampdata.pov.shadow_ray_samples_y = 2
lampdata.color = (1.0, 0.8292156958580017, 0.6966666865348816)
lampdata.energy = 0.83932#900lm/21.446(=lux)*0.004*2.5(distance) *2 for distance is the point of half strength
lampdata.distance = 1.18 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
This diff is collapsed.
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""User interface imports and preferences for the addon."""
# import addon_utils
# from time import sleep
import bpy
from bpy.app.handlers import persistent
# from bpy.utils import register_class, unregister_class
# from bpy.types import (
# Operator,
# Menu,
# UIList,
# Panel,
# Brush,
# Material,
# Light,
# World,
# ParticleSettings,
# FreestyleLineStyle,
# )
# from bl_operators.presets import AddPresetBase
from . import (
render_gui,
scenography_gui,
object_gui,
shading_gui,
texturing_gui,
shading_nodes, # for POV specific nodes
scripting_gui,
)
############# POV-Centric WORKSPACE #############
@persistent
def povCentricWorkspace(dummy):
"""Set up a POV centric Workspace if addon was activated and saved as default renderer.
This would bring a ’_RestrictData’ error because UI needs to be fully loaded before
workspace changes so registering this function in bpy.app.handlers is needed.
By default handlers are freed when loading new files, but here we want the handler
to stay running across multiple files as part of this add-on. That is why the
bpy.app.handlers.persistent decorator is used (@persistent) above.
"""
# Scripting workspace may have been altered from factory though, so should
# we put all within a Try... Except AttributeErrors ? Any better solution ?
# Should it simply not run when opening existing file? be a preferences operator to create
# Moray like workspace
if 'Scripting' in bpy.data.workspaces:
wsp = bpy.data.workspaces.get('Scripting')
context = bpy.context
if context.scene.render.engine == 'POVRAY_RENDER' and wsp is not None:
bpy.ops.workspace.duplicate({'workspace': wsp})
bpy.data.workspaces['Scripting.001'].name = 'POV'
# Already done it would seem, but explicitly make this workspace the active one
context.window.workspace = bpy.data.workspaces['POV']
pov_screen = bpy.data.workspaces['POV'].screens[0]
pov_workspace = pov_screen.areas
pov_window = context.window
try:
# Already outliners but invert both types
pov_workspace[1].spaces[0].display_mode = 'LIBRARIES'
pov_workspace[3].spaces[0].display_mode = 'VIEW_LAYER'
except AttributeError:
# But not necessarily outliners in existing blend files
pass
override = bpy.context.copy()
for area in pov_workspace:
if area.type == 'VIEW_3D':
for region in [r for r in area.regions if r.type == 'WINDOW']:
for space in area.spaces:
if space.type == 'VIEW_3D':
# override['screen'] = pov_screen
override['area'] = area
override['region'] = region
# bpy.data.workspaces['POV'].screens[0].areas[6].spaces[0].width = 333 # Read only,
# how do we set ?
# This has a glitch:
# bpy.ops.screen.area_move(override, x=(area.x + area.width), y=(area.y + 5), delta=100)
# bpy.ops.screen.area_move(override, x=(area.x + 5), y=area.y, delta=-100)
bpy.ops.screen.space_type_set_or_cycle(
override, space_type='TEXT_EDITOR'
)
space.show_region_ui = True
# bpy.ops.screen.region_scale(override)
# bpy.ops.screen.region_scale()
break
elif area.type == 'CONSOLE':
for region in [r for r in area.regions if r.type == 'WINDOW']:
for space in area.spaces:
if space.type == 'CONSOLE':
override['screen'] = pov_screen
override['window'] = pov_window
override['area'] = area
override['region'] = region
area_x = area.x + (area.width / 2)
area_y = area.y + area.height
bpy.ops.screen.space_type_set_or_cycle(override, space_type='INFO')
try:
if area == pov_workspace[6] and bpy.ops.screen.area_move.poll(
override
):
# bpy.ops.screen.area_move(override, x = area_x, y = area_y, delta = -300)
pass
# pov_window.cursor_warp(area_x, area_y-300) # Is manual move emulation necessary
# despite the delta?
except IndexError:
# Not necessarily so many areas in existing blend files
pass
break
elif area.type == 'INFO':
for region in [r for r in area.regions if r.type == 'WINDOW']:
for space in area.spaces:
if space.type == 'INFO':
# override['screen'] = pov_screen
override['area'] = area
override['region'] = region
bpy.ops.screen.space_type_set_or_cycle(
override, space_type='CONSOLE'
)
break
elif area.type == 'TEXT_EDITOR':
for region in [r for r in area.regions if r.type == 'WINDOW']:
for space in area.spaces:
if space.type == 'TEXT_EDITOR':
# override['screen'] = pov_screen
override['area'] = area
override['region'] = region
# bpy.ops.screen.space_type_set_or_cycle(space_type='VIEW_3D')
# space.type = 'VIEW_3D'
bpy.ops.screen.space_type_set_or_cycle(
override, space_type='VIEW_3D'
)
# bpy.ops.screen.area_join(override, cursor=(area.x, area.y + area.height))
break
if area.type == 'VIEW_3D':
for region in [r for r in area.regions if r.type == 'WINDOW']:
for space in area.spaces:
if space.type == 'VIEW_3D':
# override['screen'] = pov_screen
override['area'] = area
override['region'] = region
bpy.ops.screen.region_quadview(override)
space.region_3d.view_perspective = 'CAMERA'
# bpy.ops.screen.space_type_set_or_cycle(override, space_type = 'TEXT_EDITOR')
# bpy.ops.screen.region_quadview(override)
elif area.type == 'OUTLINER':
for region in [
r for r in area.regions if r.type == 'HEADER' and (r.y - area.y)
]:
for space in area.spaces:
if space.display_mode == 'LIBRARIES':
override['area'] = area
override['region'] = region
override['window'] = pov_window
bpy.ops.screen.region_flip(override)
bpy.data.workspaces.update()
'''
for window in bpy.context.window_manager.windows:
for area in [a for a in window.screen.areas if a.type == 'VIEW_3D']:
for region in [r for r in area.regions if r.type == 'WINDOW']:
context_override = {
'window': window,
'screen': window.screen,
'area': area,
'region': region,
'space_data': area.spaces.active,
'scene': bpy.context.scene
}
bpy.ops.view3d.camera_to_view(context_override)
'''
else:
print(
"\nPOV centric workspace available if you set render option\n"
"and save it in default file with CTRL+U"
)
else:
print(
"\nThe factory 'Scripting' workspace is needed before POV centric "
"\nworkspace may activate when POV is set as your default renderer"
)
####################################UTF-8###################################
# Check and fix all strings in current .blend file to be valid UTF-8 Unicode
# sometimes needed for old, 2.4x / 2.6x area files
bpy.ops.wm.blend_strings_utf8_validate()
def check_material(mat):
"""Allow use of material properties buttons rather than nodes."""
if mat is not None:
if mat.use_nodes:
if not mat.node_tree: # FORMERLY : #mat.active_node_material is not None:
return True
return False
return True
return False
def simple_material(mat):
"""Test if a material uses nodes."""
if (mat is not None) and (not mat.use_nodes):
return True
return False
def pov_context_tex_datablock(context):
"""Recreate texture context type as deprecated in blender 2.8."""
idblock = context.brush
if idblock and context.scene.texture_context == 'OTHER':
return idblock
# idblock = bpy.context.active_object.active_material
idblock = context.view_layer.objects.active.active_material
if idblock and context.scene.texture_context == 'MATERIAL':
return idblock
idblock = context.scene.world
if idblock and context.scene.texture_context == 'WORLD':
return idblock
idblock = context.light
if idblock and context.scene.texture_context == 'LIGHT':
return idblock
if context.particle_system and context.scene.texture_context == 'PARTICLES':
idblock = context.particle_system.settings
return idblock
idblock = context.line_style
if idblock and context.scene.texture_context == 'LINESTYLE':
return idblock
# class TextureTypePanel(TextureButtonsPanel):
# @classmethod
# def poll(cls, context):
# tex = context.texture
# engine = context.scene.render.engine
# return tex and ((tex.type == cls.tex_type and not tex.use_nodes) and (engine in cls.COMPAT_ENGINES))
def register():
render_gui.register()
scenography_gui.register()
object_gui.register()
shading_gui.register()
texturing_gui.register()
shading_nodes.register()
scripting_gui.register()
if not povCentricWorkspace in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.append(povCentricWorkspace)
def unregister():
if povCentricWorkspace in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.remove(povCentricWorkspace)
scripting_gui.unregister()
shading_nodes.unregister()
texturing_gui.unregister()
shading_gui.unregister()
object_gui.unregister()
scenography_gui.unregister()
render_gui.register()
......@@ -52,18 +52,19 @@ import sys
# -+-+-+- Start df3 Class -+-+-+-
class df3:
__version__ = '0.2'
__arraytype__ = 'f'
__struct4byte__ = '>I'
__struct2byte__ = '>H'
__struct4byte__ = '>I'
__struct2byte__ = '>H'
__struct2byte3__ = '>HHH'
__struct1byte__ = '>B'
__array4byte__ = 'I'
__array2byte__ = 'H'
__array1byte__ = 'B'
__struct1byte__ = '>B'
__array4byte__ = 'I'
__array2byte__ = 'H'
__array1byte__ = 'B'
def __init__(self, x=1, y=1, z=1):
self.maxX = x
......@@ -73,7 +74,7 @@ class df3:
def clone(self, indf3):
self.voxel = array.array(self.__arraytype__)
for i in range(indf3.sizeX()*indf3.sizeY()*indf3.sizeZ()):
for i in range(indf3.sizeX() * indf3.sizeY() * indf3.sizeZ()):
self.voxel[i] = indf3.voxel[i]
return self
......@@ -98,35 +99,41 @@ class df3:
#### Voxel Access Functions
def get(self, x, y, z):
return self.voxel[self.__voxa__(x,y,z)]
return self.voxel[self.__voxa__(x, y, z)]
def getB(self, x, y, z):
if (x > self.sizeX() or x < 0): return 0
if (y > self.sizeX() or y < 0): return 0
if (z > self.sizeX() or z < 0): return 0
if x > self.sizeX() or x < 0:
return 0
if y > self.sizeX() or y < 0:
return 0
if z > self.sizeX() or z < 0:
return 0
return self.voxel[self.__voxa__(x,y,z)]
return self.voxel[self.__voxa__(x, y, z)]
def set(self, x, y, z, val):
self.voxel[self.__voxa__(x,y,z)] = val
self.voxel[self.__voxa__(x, y, z)] = val
def setB(self, x, y, z, val):
if (x > self.sizeX() or x < 0): return
if (y > self.sizeX() or y < 0): return
if (z > self.sizeX() or z < 0): return
if x > self.sizeX() or x < 0:
return
if y > self.sizeX() or y < 0:
return
if z > self.sizeX() or z < 0:
return
self.voxel[self.__voxa__(x,y,z)] = val
self.voxel[self.__voxa__(x, y, z)] = val
#### Scalar Functions
def mult(self, val):
for i in range(self.sizeX()*self.sizeY()*self.sizeZ()):
for i in range(self.sizeX() * self.sizeY() * self.sizeZ()):
self.voxel[i] = self.voxel[i] * val
return self
def add(self, val):
for i in range(self.sizeX()*self.sizeY()*self.sizeZ()):
for i in range(self.sizeX() * self.sizeY() * self.sizeZ()):
self.voxel[i] = self.voxel[i] + val
return self
......@@ -134,8 +141,8 @@ class df3:
def max(self):
tmp = self.voxel[0]
for i in range(self.sizeX()*self.sizeY()*self.sizeZ()):
if (self.voxel[i] > tmp):
for i in range(self.sizeX() * self.sizeY() * self.sizeZ()):
if self.voxel[i] > tmp:
tmp = self.voxel[i]
return tmp
......@@ -143,8 +150,8 @@ class df3:
def min(self):
tmp = self.voxel[0]
for i in range(self.sizeX()*self.sizeY()*self.sizeZ()):
if (self.voxel[i] < tmp):
for i in range(self.sizeX() * self.sizeY() * self.sizeZ()):
if self.voxel[i] < tmp:
tmp = self.voxel[i]
return tmp
......@@ -152,30 +159,31 @@ class df3:
#### Vector Functions
def compare(self, indf3):
if (self.__samesize__(indf3) == 0): return 0
if self.__samesize__(indf3) == 0:
return 0
if (self.voxel == indf3.voxel):
if self.voxel == indf3.voxel:
return 1
return 0
def multV(self, indf3):
if (self.__samesize__(indf3) == 0):
if self.__samesize__(indf3) == 0:
print("Cannot multiply voxels - not same size")
return
for i in range(self.sizeX()*self.sizeY()*self.sizeZ()):
self.voxel[i] = self.voxel[i]*indf3.voxel[i]
for i in range(self.sizeX() * self.sizeY() * self.sizeZ()):
self.voxel[i] = self.voxel[i] * indf3.voxel[i]
return self
def addV(self, indf3):
if (self.__samesize__(indf3) == 0):
if self.__samesize__(indf3) == 0:
print("Cannot add voxels - not same size")
return
for i in range(self.sizeX()*self.sizeY()*self.sizeZ()):
self.voxel[i] = self.voxel[i]+indf3.voxel[i]
for i in range(self.sizeX() * self.sizeY() * self.sizeZ()):
self.voxel[i] = self.voxel[i] + indf3.voxel[i]
return self
......@@ -183,31 +191,31 @@ class df3:
fx = filt.sizeX()
fy = filt.sizeY()
fz = filt.sizeZ()
if (fx % 2 != 1):
if fx % 2 != 1:
print("Incompatible filter - must be odd number of X")
return self
if (fy % 2 != 1):
if fy % 2 != 1:
print("Incompatible filter - must be odd number of Y")
return self
if (fz % 2 != 1):
if fz % 2 != 1:
print("Incompatible filter - must be odd number of Z")
return self
fdx = (fx-1)/2
fdy = (fy-1)/2
fdz = (fz-1)/2
flen = fx*fy*fz
fdx = (fx - 1) / 2
fdy = (fy - 1) / 2
fdz = (fz - 1) / 2
flen = fx * fy * fz
newV = self.__create__(self.sizeX(), self.sizeY(), self.sizeZ());
newV = self.__create__(self.sizeX(), self.sizeY(), self.sizeZ())
for x in range(self.sizeX()):
for y in range(self.sizeY()):
for z in range(self.sizeZ()):
rip = self.__rip__(x-fdx, x+fdx, y-fdy, y+fdy, z-fdz, z+fdz)
rip = self.__rip__(x - fdx, x + fdx, y - fdy, y + fdy, z - fdz, z + fdz)
tmp = 0.0
for i in range(flen):
tmp += rip[i]*filt.voxel[i]
newV[self.__voxa__(x,y,z)] = tmp
tmp += rip[i] * filt.voxel[i]
newV[self.__voxa__(x, y, z)] = tmp
self.voxel = newV
......@@ -221,64 +229,67 @@ class df3:
z = self.sizeZ()
try:
f = open(file, 'wb');
f = open(file, 'wb')
except:
print("Could not open " + file + " for write");
print("Could not open " + file + " for write")
return
f.write(struct.pack(self.__struct2byte3__, x, y, z));
f.write(struct.pack(self.__struct2byte3__, x, y, z))
tmp = self.__toInteger__(pow(2,depth)-1, rescale)
tmp = self.__toInteger__(pow(2, depth) - 1, rescale)
if (depth > 16): # 32-bit
for i in range( x*y*z ):
if depth > 16: # 32-bit
for i in range(x * y * z):
f.write(struct.pack(self.__struct4byte__, tmp[i]))
elif (depth > 8): # 16-bit
for i in range( x*y*z ):
elif depth > 8: # 16-bit
for i in range(x * y * z):
f.write(struct.pack(self.__struct2byte__, tmp[i]))
else:
for i in range( x*y*z ):
for i in range(x * y * z):
f.write(struct.pack(self.__struct1byte__, tmp[i]))
def importDF3(self, file, scale=1):
try:
f = open(file, 'rb');
f = open(file, 'rb')
size = os.stat(file)[stat.ST_SIZE]
except:
print("Could not open " + file + " for read");
print("Could not open " + file + " for read")
return []
(x, y, z) = struct.unpack(self.__struct2byte3__, f.read(6) )
(x, y, z) = struct.unpack(self.__struct2byte3__, f.read(6))
self.voxel = self.__create__(x, y, z)
self.maxX = x
self.maxY = y
self.maxZ = z
size = size-6
if (size == x*y*z): format = 8
elif (size == 2*x*y*z): format = 16
elif (size == 4*x*y*z): format = 32
if (format == 32):
for i in range(x*y*z):
self.voxel[i] = float(struct.unpack(self.__struct4byte__, f.read(4) )[0])
elif (format == 16):
for i in range(x*y*z):
self.voxel[i] = float(struct.unpack(self.__struct2byte__, f.read(2) )[0])
elif (format == 8):
for i in range(x*y*z):
self.voxel[i] = float(struct.unpack(self.__struct1byte__, f.read(1) )[0])
size = size - 6
if size == x * y * z:
format = 8
elif size == 2 * x * y * z:
format = 16
elif size == 4 * x * y * z:
format = 32
if format == 32:
for i in range(x * y * z):
self.voxel[i] = float(struct.unpack(self.__struct4byte__, f.read(4))[0])
elif format == 16:
for i in range(x * y * z):
self.voxel[i] = float(struct.unpack(self.__struct2byte__, f.read(2))[0])
elif format == 8:
for i in range(x * y * z):
self.voxel[i] = float(struct.unpack(self.__struct1byte__, f.read(1))[0])
return self
#### Local classes not intended for user use
def __rip__(self, minX, maxX, minY, maxY, minZ, maxZ):
sizeX = maxX-minX+1
sizeY = maxY-minY+1
sizeZ = maxZ-minZ+1
sizeX = maxX - minX + 1
sizeY = maxY - minY + 1
sizeZ = maxZ - minZ + 1
tmpV = self.__create__(sizeX, sizeY, sizeZ)
......@@ -286,95 +297,99 @@ class df3:
for y in range(sizeY):
for z in range(sizeZ):
# Check X
if ((minX + x) < 0):
tmpV[(z*sizeZ+y)*sizeY+x] = 0.0
elif ((minX + x) > self.sizeX()-1):
tmpV[(z*sizeZ+y)*sizeY+x] = 0.0
if (minX + x) < 0:
tmpV[(z * sizeZ + y) * sizeY + x] = 0.0
elif (minX + x) > self.sizeX() - 1:
tmpV[(z * sizeZ + y) * sizeY + x] = 0.0
# Check Y
elif ((minY + y) < 0):
tmpV[(z*sizeZ+y)*sizeY+x] = 0.0
elif ((minY + y) > self.sizeY()-1):
tmpV[(z*sizeZ+y)*sizeY+x] = 0.0
elif (minY + y) < 0:
tmpV[(z * sizeZ + y) * sizeY + x] = 0.0
elif (minY + y) > self.sizeY() - 1:
tmpV[(z * sizeZ + y) * sizeY + x] = 0.0
# Check Z
elif ((minZ + z) < 0):
tmpV[(z*sizeZ+y)*sizeY+x] = 0.0
elif ((minZ + z) > self.sizeZ()-1):
tmpV[(z*sizeZ+y)*sizeY+x] = 0.0
elif (minZ + z) < 0:
tmpV[(z * sizeZ + y) * sizeY + x] = 0.0
elif (minZ + z) > self.sizeZ() - 1:
tmpV[(z * sizeZ + y) * sizeY + x] = 0.0
else:
tmpV[(z*sizeZ+y)*sizeY+x] = self.get(minX+x,minY+y,minZ+z)
tmpV[(z * sizeZ + y) * sizeY + x] = self.get(minX + x, minY + y, minZ + z)
return tmpV
def __samesize__(self, indf3):
if (self.sizeX() != indf3.sizeX()): return 0
if (self.sizeY() != indf3.sizeY()): return 0
if (self.sizeZ() != indf3.sizeZ()): return 0
if self.sizeX() != indf3.sizeX():
return 0
if self.sizeY() != indf3.sizeY():
return 0
if self.sizeZ() != indf3.sizeZ():
return 0
return 1
def __voxa__(self, x, y, z):
return ((z*self.sizeY()+y)*self.sizeX()+x)
return (z * self.sizeY() + y) * self.sizeX() + x
def __create__(self, x, y, z, atype='0', init=1):
if (atype == '0'):
if atype == '0':
tmp = self.__arraytype__
else:
tmp = atype
if (init == 1):
if tmp in ('f','d'):
voxel = array.array(tmp, [0.0 for i in range(x*y*z)])
if init == 1:
if tmp in ('f', 'd'):
voxel = array.array(tmp, [0.0 for i in range(x * y * z)])
else:
voxel = array.array(tmp, [0 for i in range(x*y*z)])
voxel = array.array(tmp, [0 for i in range(x * y * z)])
else:
voxel = array.array(tmp)
return voxel
def __toInteger__(self, scale, rescale=1):
if (scale < pow(2,8)): # 8-bit
if scale < pow(2, 8): # 8-bit
tmp = self.__create__(self.sizeX(), self.sizeY(), self.sizeZ(), self.__array1byte__)
elif (scale < pow(2,16)): # 16-bit
elif scale < pow(2, 16): # 16-bit
tmp = self.__create__(self.sizeX(), self.sizeY(), self.sizeZ(), self.__array2byte__)
else: # 32-bit
else: # 32-bit
tmp = self.__create__(self.sizeX(), self.sizeY(), self.sizeZ(), self.__array4byte__)
maxVal = self.max()
print(scale)
for i in range(self.sizeX()*self.sizeY()*self.sizeZ()):
if (rescale == 1):
tmp[i] = max(0,int(round(scale*self.voxel[i]/maxVal)))
for i in range(self.sizeX() * self.sizeY() * self.sizeZ()):
if rescale == 1:
tmp[i] = max(0, int(round(scale * self.voxel[i] / maxVal)))
else:
tmp[i] = max(0,min(scale,int(round(self.voxel[i]))))
tmp[i] = max(0, min(scale, int(round(self.voxel[i]))))
return tmp
# -=-=-=- End df3 Class -=-=-=-
##########DEFAULT EXAMPLES
# if __name__ == '__main__':
# localX = 80
# localY = 90
# localZ = 100
## Generate an output
# temp = df3(localX, localY, localZ)
# for i in range(localX):
# for j in range(localY):
# for k in range(localZ):
# if (i >= (localX/2)):
# temp.set(i, j, k, 1.0)
# temp.exportDF3('temp.df3', 16)
# localX = 80
# localY = 90
# localZ = 100
## Generate an output
# temp = df3(localX, localY, localZ)
# for i in range(localX):
# for j in range(localY):
# for k in range(localZ):
# if (i >= (localX/2)):
# temp.set(i, j, k, 1.0)
# temp.exportDF3('temp.df3', 16)
###############################################################################
## Import
# temp2 = df3().importDF3('temp.df3')
# temp2.mult(1/temp2.max())
## Import
# temp2 = df3().importDF3('temp.df3')
# temp2.mult(1/temp2.max())
## Compare
# print(temp2.size())
## Compare
# print(temp2.size())
# if (temp.compare(temp2) == 0): print("DF3's Do Not Match")
# if (temp.compare(temp2) == 0): print("DF3's Do Not Match")
###############################################################################
# ChangeLog
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""Get some Blender particle objects translated to POV."""
import bpy
def export_hair(file, ob, p_sys, global_matrix, write_matrix):
"""Get Blender path particles (hair strands) objects translated to POV sphere_sweep unions."""
# tstart = time.time()
textured_hair = 0
if ob.material_slots[p_sys.settings.material - 1].material and ob.active_material is not None:
pmaterial = ob.material_slots[p_sys.settings.material - 1].material
# XXX Todo: replace by pov_(Particles?)_texture_slot
for th in pmaterial.pov_texture_slots:
povtex = th.texture # slot.name
tex = bpy.data.textures[povtex]
if th and th.use:
if (tex.type == 'IMAGE' and tex.image) or tex.type != 'IMAGE':
if th.use_map_color_diffuse:
textured_hair = 1
if pmaterial.strand.use_blender_units:
strand_start = pmaterial.strand.root_size
strand_end = pmaterial.strand.tip_size
strand_shape = pmaterial.strand.shape
else: # Blender unit conversion
strand_start = pmaterial.strand.root_size / 200.0
strand_end = pmaterial.strand.tip_size / 200.0
strand_shape = pmaterial.strand.shape
else:
pmaterial = "default" # No material assigned in blender, use default one
strand_start = 0.01
strand_end = 0.01
strand_shape = 0.0
# Set the number of particles to render count rather than 3d view display
# p_sys.set_resolution(scene, ob, 'RENDER') # DEPRECATED
# When you render, the entire dependency graph will be
# evaluated at render resolution, including the particles.
# In the viewport it will be at viewport resolution.
# So there is no need fo render engines to use this function anymore,
# it's automatic now.
steps = p_sys.settings.display_step
steps = 2 ** steps # or + 1 # Formerly : len(particle.hair_keys)
total_number_of_strands = p_sys.settings.count + p_sys.settings.rendered_child_count
# hairCounter = 0
file.write('#declare HairArray = array[%i] {\n' % total_number_of_strands)
for pindex in range(0, total_number_of_strands):
# if particle.is_exist and particle.is_visible:
# hairCounter += 1
# controlPointCounter = 0
# Each hair is represented as a separate sphere_sweep in POV-Ray.
file.write('sphere_sweep{')
if p_sys.settings.use_hair_bspline:
file.write('b_spline ')
file.write(
'%i,\n' % (steps + 2)
) # +2 because the first point needs tripling to be more than a handle in POV
else:
file.write('linear_spline ')
file.write('%i,\n' % (steps))
# changing world coordinates to object local coordinates by
# multiplying with inverted matrix
init_coord = ob.matrix_world.inverted() @ (p_sys.co_hair(ob, particle_no=pindex, step=0))
if (
ob.material_slots[p_sys.settings.material - 1].material
and ob.active_material is not None
):
pmaterial = ob.material_slots[p_sys.settings.material - 1].material
for th in pmaterial.pov_texture_slots:
if th and th.use and th.use_map_color_diffuse:
povtex = th.texture # slot.name
tex = bpy.data.textures[povtex]
# treat POV textures as bitmaps
if (
tex.type == 'IMAGE'
and tex.image
and th.texture_coords == 'UV'
and ob.data.uv_textures is not None
):
# or (
# tex.pov.tex_pattern_type != 'emulator'
# and th.texture_coords == 'UV'
# and ob.data.uv_textures is not None
# ):
image = tex.image
image_width = image.size[0]
image_height = image.size[1]
image_pixels = image.pixels[:]
uv_co = p_sys.uv_on_emitter(mod, p_sys.particles[pindex], pindex, 0)
x_co = round(uv_co[0] * (image_width - 1))
y_co = round(uv_co[1] * (image_height - 1))
pixelnumber = (image_width * y_co) + x_co
r = image_pixels[pixelnumber * 4]
g = image_pixels[pixelnumber * 4 + 1]
b = image_pixels[pixelnumber * 4 + 2]
a = image_pixels[pixelnumber * 4 + 3]
init_color = (r, g, b, a)
else:
# only overwrite variable for each competing texture for now
init_color = tex.evaluate((init_coord[0], init_coord[1], init_coord[2]))
for step in range(0, steps):
coord = ob.matrix_world.inverted() @ (p_sys.co_hair(ob, particle_no=pindex, step=step))
# for controlPoint in particle.hair_keys:
if p_sys.settings.clump_factor != 0:
hair_strand_diameter = p_sys.settings.clump_factor / 200.0 * random.uniform(0.5, 1)
elif step == 0:
hair_strand_diameter = strand_start
else:
hair_strand_diameter += (strand_end - strand_start) / (
p_sys.settings.display_step + 1
) # XXX +1 or not? # XXX use strand_shape in formula
if step == 0 and p_sys.settings.use_hair_bspline:
# Write three times the first point to compensate pov Bezier handling
file.write(
'<%.6g,%.6g,%.6g>,%.7g,\n'
% (coord[0], coord[1], coord[2], abs(hair_strand_diameter))
)
file.write(
'<%.6g,%.6g,%.6g>,%.7g,\n'
% (coord[0], coord[1], coord[2], abs(hair_strand_diameter))
)
# Useless because particle location is the tip, not the root:
# file.write(
# '<%.6g,%.6g,%.6g>,%.7g'
# % (
# particle.location[0],
# particle.location[1],
# particle.location[2],
# abs(hair_strand_diameter)
# )
# )
# file.write(',\n')
# controlPointCounter += 1
# total_number_of_strands += len(p_sys.particles)# len(particle.hair_keys)
# Each control point is written out, along with the radius of the
# hair at that point.
file.write(
'<%.6g,%.6g,%.6g>,%.7g' % (coord[0], coord[1], coord[2], abs(hair_strand_diameter))
)
# All coordinates except the last need a following comma.
if step != steps - 1:
file.write(',\n')
else:
if textured_hair:
# Write pigment and alpha (between Pov and Blender,
# alpha 0 and 1 are reversed)
file.write(
'\npigment{ color srgbf < %.3g, %.3g, %.3g, %.3g> }\n'
% (init_color[0], init_color[1], init_color[2], 1.0 - init_color[3])
)
# End the sphere_sweep declaration for this hair
file.write('}\n')
# All but the final sphere_sweep (each array element) needs a terminating comma.
if pindex != total_number_of_strands:
file.write(',\n')
else:
file.write('\n')
# End the array declaration.
file.write('}\n')
file.write('\n')
if not textured_hair:
# Pick up the hair material diffuse color and create a default POV-Ray hair texture.
file.write('#ifndef (HairTexture)\n')
file.write(' #declare HairTexture = texture {\n')
file.write(
' pigment {srgbt <%s,%s,%s,%s>}\n'
% (
pmaterial.diffuse_color[0],
pmaterial.diffuse_color[1],
pmaterial.diffuse_color[2],
(pmaterial.strand.width_fade + 0.05),
)
)
file.write(' }\n')
file.write('#end\n')
file.write('\n')
# Dynamically create a union of the hairstrands (or a subset of them).
# By default use every hairstrand, commented line is for hand tweaking test renders.
file.write('//Increasing HairStep divides the amount of hair for test renders.\n')
file.write('#ifndef(HairStep) #declare HairStep = 1; #end\n')
file.write('union{\n')
file.write(' #local I = 0;\n')
file.write(' #while (I < %i)\n' % total_number_of_strands)
file.write(' object {HairArray[I]')
if not textured_hair:
file.write(' texture{HairTexture}\n')
else:
file.write('\n')
# Translucency of the hair:
file.write(' hollow\n')
file.write(' double_illuminate\n')
file.write(' interior {\n')
file.write(' ior 1.45\n')
file.write(' media {\n')
file.write(' scattering { 1, 10*<0.73, 0.35, 0.15> /*extinction 0*/ }\n')
file.write(' absorption 10/<0.83, 0.75, 0.15>\n')
file.write(' samples 1\n')
file.write(' method 2\n')
file.write(' density {cylindrical\n')
file.write(' color_map {\n')
file.write(' [0.0 rgb <0.83, 0.45, 0.35>]\n')
file.write(' [0.5 rgb <0.8, 0.8, 0.4>]\n')
file.write(' [1.0 rgb <1,1,1>]\n')
file.write(' }\n')
file.write(' }\n')
file.write(' }\n')
file.write(' }\n')
file.write(' }\n')
file.write(' #local I = I + HairStep;\n')
file.write(' #end\n')
write_matrix(global_matrix @ ob.matrix_world)
file.write('}')
print('Totals hairstrands written: %i' % total_number_of_strands)
print('Number of tufts (particle systems)', len(ob.particle_systems))
# Set back the displayed number of particles to preview count
# p_sys.set_resolution(scene, ob, 'PREVIEW') #DEPRECATED
# When you render, the entire dependency graph will be
# evaluated at render resolution, including the particles.
# In the viewport it will be at viewport resolution.
# So there is no need fo render engines to use this function anymore,
# it's automatic now.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment