Skip to content
Snippets Groups Projects
Commit 8a1a2e24 authored by Vilém Duha's avatar Vilém Duha
Browse files

BlenderKit: fixes for resolutions and unpacking

32 bit pictures were not handled correctly and could produce wrong unpack and resolution files.

Also moves around image utils and resolutions code to fitting files
Also adds into image utils a function check_nmap_ogl_vs_dx which can recognize if a normal map is DX or openGL. Will enable more pre-validation checks for users.
parent a1fbcb61
No related branches found
No related tags found
No related merge requests found
import bpy
import os
import time
def get_orig_render_settings():
rs = bpy.context.scene.render
......@@ -98,3 +99,391 @@ def generate_hdr_thumbnail():
inew.scale(thumbnailWidth, thumbnailHeight)
img_save_as(inew, filepath=inew.filepath)
def find_color_mode(image):
if not isinstance(image, bpy.types.Image):
raise(TypeError)
else:
depth_mapping = {
8: 'BW',
24: 'RGB',
32: 'RGBA',#can also be bw.. but image.channels doesn't work.
96: 'RGB',
128: 'RGBA',
}
return depth_mapping.get(image.depth,'RGB')
def find_image_depth(image):
if not isinstance(image, bpy.types.Image):
raise(TypeError)
else:
depth_mapping = {
8: '8',
24: '8',
32: '8',#can also be bw.. but image.channels doesn't work.
96: '16',
128: '16',
}
return depth_mapping.get(image.depth,'8')
def can_erase_alpha(na):
alpha = na[3::4]
alpha_sum = alpha.sum()
if alpha_sum == alpha.size:
print('image can have alpha erased')
# print(alpha_sum, alpha.size)
return alpha_sum == alpha.size
def is_image_black(na):
r = na[::4]
g = na[1::4]
b = na[2::4]
rgbsum = r.sum() + g.sum() + b.sum()
# print('rgb sum', rgbsum, r.sum(), g.sum(), b.sum())
if rgbsum == 0:
print('image can have alpha channel dropped')
return rgbsum == 0
def is_image_bw(na):
r = na[::4]
g = na[1::4]
b = na[2::4]
rg_equal = r == g
gb_equal = g == b
rgbequal = rg_equal.all() and gb_equal.all()
if rgbequal:
print('image is black and white, can have channels reduced')
return rgbequal
def numpytoimage(a, iname, width=0, height=0, channels=3):
t = time.time()
foundimage = False
for image in bpy.data.images:
if image.name[:len(iname)] == iname and image.size[0] == a.shape[0] and image.size[1] == a.shape[1]:
i = image
foundimage = True
if not foundimage:
if channels == 4:
bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0, 1), alpha=True,
generated_type='BLANK', float=True)
if channels == 3:
bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0), alpha=False,
generated_type='BLANK', float=True)
i = None
for image in bpy.data.images:
# print(image.name[:len(iname)],iname, image.size[0],a.shape[0],image.size[1],a.shape[1])
if image.name[:len(iname)] == iname and image.size[0] == width and image.size[1] == height:
i = image
if i is None:
i = bpy.data.images.new(iname, width, height, alpha=False, float_buffer=False, stereo3d=False, is_data=False, tiled=False)
# dropping this re-shaping code - just doing flat array for speed and simplicity
# d = a.shape[0] * a.shape[1]
# a = a.swapaxes(0, 1)
# a = a.reshape(d)
# a = a.repeat(channels)
# a[3::4] = 1
i.pixels.foreach_set(a) # this gives big speedup!
print('\ntime ' + str(time.time() - t))
return i
def imagetonumpy_flat(i):
t = time.time()
import numpy
width = i.size[0]
height = i.size[1]
# print(i.channels)
size = width * height * i.channels
na = numpy.empty(size, numpy.float32)
i.pixels.foreach_get(na)
# dropping this re-shaping code - just doing flat array for speed and simplicity
# na = na[::4]
# na = na.reshape(height, width, i.channels)
# na = na.swapaxnes(0, 1)
# print('\ntime of image to numpy ' + str(time.time() - t))
return na
def imagetonumpy(i):
t = time.time()
import numpy as np
width = i.size[0]
height = i.size[1]
# print(i.channels)
size = width * height * i.channels
na = np.empty(size, np.float32)
i.pixels.foreach_get(na)
# dropping this re-shaping code - just doing flat array for speed and simplicity
# na = na[::4]
na = na.reshape(height, width, i.channels)
na = na.swapaxes(0, 1)
# print('\ntime of image to numpy ' + str(time.time() - t))
return na
def downscale(i):
minsize = 128
sx, sy = i.size[:]
sx = round(sx / 2)
sy = round(sy / 2)
if sx > minsize and sy > minsize:
i.scale(sx, sy)
def get_rgb_mean(i):
'''checks if normal map values are ok.'''
import numpy
na = imagetonumpy_flat(i)
r = na[::4]
g = na[1::4]
b = na[2::4]
rmean = r.mean()
gmean = g.mean()
bmean = b.mean()
rmedian = numpy.median(r)
gmedian = numpy.median(g)
bmedian = numpy.median(b)
# return(rmedian,gmedian, bmedian)
return (rmean, gmean, bmean)
def check_nmap_mean_ok(i):
'''checks if normal map values are in standard range.'''
rmean,gmean,bmean = get_rgb_mean(i)
#we could/should also check blue, but some ogl substance exports have 0-1, while 90% nmaps have 0.5 - 1.
nmap_ok = 0.45< rmean < 0.55 and .45 < gmean < .55
return nmap_ok
def check_nmap_ogl_vs_dx(i, mask = None, generated_test_images = False):
'''
checks if normal map is directX or OpenGL.
Returns - String value - DirectX and OpenGL
'''
import numpy
width = i.size[0]
height = i.size[1]
rmean, gmean, bmean = get_rgb_mean(i)
na = imagetonumpy(i)
if mask:
mask = imagetonumpy(mask)
red_x_comparison = numpy.zeros((width, height), numpy.float32)
green_y_comparison = numpy.zeros((width, height), numpy.float32)
if generated_test_images:
red_x_comparison_img = numpy.empty((width, height, 4), numpy.float32) #images for debugging purposes
green_y_comparison_img = numpy.empty((width, height, 4), numpy.float32)#images for debugging purposes
ogl = numpy.zeros((width, height), numpy.float32)
dx = numpy.zeros((width, height), numpy.float32)
if generated_test_images:
ogl_img = numpy.empty((width, height, 4), numpy.float32) # images for debugging purposes
dx_img = numpy.empty((width, height, 4), numpy.float32) # images for debugging purposes
for y in range(0, height):
for x in range(0, width):
#try to mask with UV mask image
if mask is None or mask[x,y,3]>0:
last_height_x = ogl[max(x - 1, 0), min(y, height - 1)]
last_height_y = ogl[max(x,0), min(y - 1,height-1)]
diff_x = ((na[x, y, 0] - rmean) / ((na[x, y, 2] - 0.5)))
diff_y = ((na[x, y, 1] - gmean) / ((na[x, y, 2] - 0.5)))
calc_height = (last_height_x + last_height_y) \
- diff_x - diff_y
calc_height = calc_height /2
ogl[x, y] = calc_height
if generated_test_images:
rgb = calc_height *.1 +.5
ogl_img[x,y] = [rgb,rgb,rgb,1]
# green channel
last_height_x = dx[max(x - 1, 0), min(y, height - 1)]
last_height_y = dx[max(x, 0), min(y - 1, height - 1)]
diff_x = ((na[x, y, 0] - rmean) / ((na[x, y, 2] - 0.5)))
diff_y = ((na[x, y, 1] - gmean) / ((na[x, y, 2] - 0.5)))
calc_height = (last_height_x + last_height_y) \
- diff_x + diff_y
calc_height = calc_height / 2
dx[x, y] = calc_height
if generated_test_images:
rgb = calc_height * .1 + .5
dx_img[x, y] = [rgb, rgb, rgb, 1]
ogl_std = ogl.std()
dx_std = dx.std()
# print(mean_ogl, mean_dx)
# print(max_ogl, max_dx)
print(ogl_std, dx_std)
print(i.name)
# if abs(mean_ogl) > abs(mean_dx):
if abs(ogl_std) > abs(dx_std):
print('this is probably a DirectX texture')
else:
print('this is probably an OpenGL texture')
if generated_test_images:
# red_x_comparison_img = red_x_comparison_img.swapaxes(0,1)
# red_x_comparison_img = red_x_comparison_img.flatten()
#
# green_y_comparison_img = green_y_comparison_img.swapaxes(0,1)
# green_y_comparison_img = green_y_comparison_img.flatten()
#
# numpytoimage(red_x_comparison_img, 'red_' + i.name, width=width, height=height, channels=1)
# numpytoimage(green_y_comparison_img, 'green_' + i.name, width=width, height=height, channels=1)
ogl_img = ogl_img.swapaxes(0, 1)
ogl_img = ogl_img.flatten()
dx_img = dx_img.swapaxes(0, 1)
dx_img = dx_img.flatten()
numpytoimage(ogl_img, 'OpenGL', width=width, height=height, channels=1)
numpytoimage(dx_img, 'DirectX', width=width, height=height, channels=1)
if abs(ogl_std) > abs(dx_std):
return 'DirectX'
return 'OpenGL'
def make_possible_reductions_on_image(teximage, input_filepath, do_reductions=False, do_downscale=False):
'''checks the image and saves it to drive with possibly reduced channels.
Also can remove the image from the asset if the image is pure black
- it finds it's usages and replaces the inputs where the image is used
with zero/black color.
currently implemented file type conversions:
PNG->JPG
'''
colorspace = teximage.colorspace_settings.name
teximage.colorspace_settings.name = 'Non-Color'
#teximage.colorspace_settings.name = 'sRGB' color correction mambo jambo.
JPEG_QUALITY = 90
# is_image_black(na)
# is_image_bw(na)
rs = bpy.context.scene.render
ims = rs.image_settings
orig_file_format = ims.file_format
orig_quality = ims.quality
orig_color_mode = ims.color_mode
orig_compression = ims.compression
orig_depth = ims.color_depth
# if is_image_black(na):
# # just erase the image from the asset here, no need to store black images.
# pass;
# fp = teximage.filepath
# setup image depth, 8 or 16 bit.
# this should normally divide depth with number of channels, but blender always states that number of channels is 4, even if there are only 3
print(teximage.name)
print(teximage.depth)
print(teximage.channels)
bpy.context.scene.display_settings.display_device = 'None'
image_depth = find_image_depth(teximage)
ims.color_mode = find_color_mode(teximage)
#image_depth = str(max(min(int(teximage.depth / 3), 16), 8))
print('resulting depth set to:', image_depth)
fp = input_filepath
if do_reductions:
na = imagetonumpy_flat(teximage)
if can_erase_alpha(na):
print(teximage.file_format)
if teximage.file_format == 'PNG':
print('changing type of image to JPG')
base, ext = os.path.splitext(fp)
teximage['original_extension'] = ext
fp = fp.replace('.png', '.jpg')
fp = fp.replace('.PNG', '.jpg')
teximage.name = teximage.name.replace('.png', '.jpg')
teximage.name = teximage.name.replace('.PNG', '.jpg')
teximage.file_format = 'JPEG'
ims.quality = JPEG_QUALITY
ims.color_mode = 'RGB'
if is_image_bw(na):
ims.color_mode = 'BW'
ims.file_format = teximage.file_format
ims.color_depth = image_depth
# all pngs with max compression
if ims.file_format == 'PNG':
ims.compression = 100
# all jpgs brought to reasonable quality
if ims.file_format == 'JPG':
ims.quality = JPEG_QUALITY
if do_downscale:
downscale(teximage)
# it's actually very important not to try to change the image filepath and packed file filepath before saving,
# blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
teximage.save_render(filepath=bpy.path.abspath(fp), scene=bpy.context.scene)
if len(teximage.packed_files) > 0:
teximage.unpack(method='REMOVE')
teximage.filepath = fp
teximage.filepath_raw = fp
teximage.reload()
teximage.colorspace_settings.name = colorspace
ims.file_format = orig_file_format
ims.quality = orig_quality
ims.color_mode = orig_color_mode
ims.compression = orig_compression
ims.color_depth = orig_depth
\ No newline at end of file
......@@ -55,100 +55,9 @@ def get_current_resolution():
return actres
def can_erase_alpha(na):
alpha = na[3::4]
alpha_sum = alpha.sum()
if alpha_sum == alpha.size:
print('image can have alpha erased')
# print(alpha_sum, alpha.size)
return alpha_sum == alpha.size
def is_image_black(na):
r = na[::4]
g = na[1::4]
b = na[2::4]
rgbsum = r.sum() + g.sum() + b.sum()
# print('rgb sum', rgbsum, r.sum(), g.sum(), b.sum())
if rgbsum == 0:
print('image can have alpha channel dropped')
return rgbsum == 0
def is_image_bw(na):
r = na[::4]
g = na[1::4]
b = na[2::4]
rg_equal = r == g
gb_equal = g == b
rgbequal = rg_equal.all() and gb_equal.all()
if rgbequal:
print('image is black and white, can have channels reduced')
return rgbequal
def numpytoimage(a, iname, width=0, height=0, channels=3):
t = time.time()
foundimage = False
for image in bpy.data.images:
if image.name[:len(iname)] == iname and image.size[0] == a.shape[0] and image.size[1] == a.shape[1]:
i = image
foundimage = True
if not foundimage:
if channels == 4:
bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0, 1), alpha=True,
generated_type='BLANK', float=True)
if channels == 3:
bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0), alpha=False,
generated_type='BLANK', float=True)
for image in bpy.data.images:
# print(image.name[:len(iname)],iname, image.size[0],a.shape[0],image.size[1],a.shape[1])
if image.name[:len(iname)] == iname and image.size[0] == width and image.size[1] == height:
i = image
# dropping this re-shaping code - just doing flat array for speed and simplicity
# d = a.shape[0] * a.shape[1]
# a = a.swapaxes(0, 1)
# a = a.reshape(d)
# a = a.repeat(channels)
# a[3::4] = 1
i.pixels.foreach_set(a) # this gives big speedup!
print('\ntime ' + str(time.time() - t))
return i
def imagetonumpy(i):
t = time.time()
import numpy as np
width = i.size[0]
height = i.size[1]
# print(i.channels)
size = width * height * i.channels
na = np.empty(size, np.float32)
i.pixels.foreach_get(na)
# dropping this re-shaping code - just doing flat array for speed and simplicity
# na = na[::4]
# na = na.reshape(height, width, i.channels)
# na = na.swapaxnes(0, 1)
# print('\ntime of image to numpy ' + str(time.time() - t))
return na
def save_image_safely(teximage, filepath):
'''
Blender makes it really hard to save images... this is to fix it's crazy bad image saving.
Blender makes it really hard to save images...
Would be worth investigating PIL or similar instead
Parameters
----------
......@@ -204,95 +113,8 @@ def extxchange_to_resolution(filepath):
ext = 'jpg'
def make_possible_reductions_on_image(teximage, input_filepath, do_reductions=False, do_downscale=False):
'''checks the image and saves it to drive with possibly reduced channels.
Also can remove the image from the asset if the image is pure black
- it finds it's usages and replaces the inputs where the image is used
with zero/black color.
currently implemented file type conversions:
PNG->JPG
'''
colorspace = teximage.colorspace_settings.name
teximage.colorspace_settings.name = 'Non-Color'
JPEG_QUALITY = 90
# is_image_black(na)
# is_image_bw(na)
rs = bpy.context.scene.render
ims = rs.image_settings
orig_file_format = ims.file_format
orig_quality = ims.quality
orig_color_mode = ims.color_mode
orig_compression = ims.compression
# if is_image_black(na):
# # just erase the image from the asset here, no need to store black images.
# pass;
# fp = teximage.filepath
fp = input_filepath
if do_reductions:
na = imagetonumpy(teximage)
if can_erase_alpha(na):
print(teximage.file_format)
if teximage.file_format == 'PNG':
print('changing type of image to JPG')
base, ext = os.path.splitext(fp)
teximage['original_extension'] = ext
fp = fp.replace('.png', '.jpg')
fp = fp.replace('.PNG', '.jpg')
teximage.name = teximage.name.replace('.png', '.jpg')
teximage.name = teximage.name.replace('.PNG', '.jpg')
teximage.file_format = 'JPEG'
ims.quality = JPEG_QUALITY
ims.color_mode = 'RGB'
if is_image_bw(na):
ims.color_mode = 'BW'
ims.file_format = teximage.file_format
# all pngs with max compression
if ims.file_format == 'PNG':
ims.compression = 100
# all jpgs brought to reasonable quality
if ims.file_format == 'JPG':
ims.quality = JPEG_QUALITY
if do_downscale:
downscale(teximage)
# it's actually very important not to try to change the image filepath and packed file filepath before saving,
# blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
teximage.save_render(filepath=bpy.path.abspath(fp), scene=bpy.context.scene)
if len(teximage.packed_files) > 0:
teximage.unpack(method='REMOVE')
teximage.filepath = fp
teximage.filepath_raw = fp
teximage.reload()
teximage.colorspace_settings.name = colorspace
ims.file_format = orig_file_format
ims.quality = orig_quality
ims.color_mode = orig_color_mode
ims.compression = orig_compression
def downscale(i):
minsize = 128
sx, sy = i.size[:]
sx = round(sx / 2)
sy = round(sy / 2)
if sx > minsize and sy > minsize:
i.scale(sx, sy)
def upload_resolutions(files, asset_data):
......@@ -341,9 +163,10 @@ def unpack_asset(data):
pf.filepath = fp # bpy.path.abspath(fp)
image.filepath = fp # bpy.path.abspath(fp)
image.filepath_raw = fp # bpy.path.abspath(fp)
image.save()
# image.save()
if len(image.packed_files) > 0:
image.unpack(method='REMOVE')
# image.unpack(method='REMOVE')
image.unpack(method='WRITE_ORIGINAL')
bpy.ops.wm.save_mainfile(compress=False)
# now try to delete the .blend1 file
......@@ -524,11 +347,11 @@ def generate_lower_resolutions(data):
# first, let's link the image back to the original one.
i['blenderkit_original_path'] = i.filepath
# first round also makes reductions on the image, while keeping resolution
make_possible_reductions_on_image(i, fp, do_reductions=True, do_downscale=False)
image_utils.make_possible_reductions_on_image(i, fp, do_reductions=True, do_downscale=False)
else:
# lower resolutions only downscale
make_possible_reductions_on_image(i, fp, do_reductions=False, do_downscale=True)
image_utils.make_possible_reductions_on_image(i, fp, do_reductions=False, do_downscale=True)
abspath = bpy.path.abspath(i.filepath)
if os.path.exists(abspath):
......@@ -556,7 +379,7 @@ def generate_lower_resolutions(data):
else:
p2res = rkeys[rkeys.index(p2res) - 1]
print('uploading resolution files')
upload_resolutions(files, data['asset_data'])
#upload_resolutions(files, data['asset_data'])
preferences = bpy.context.preferences.addons['blenderkit'].preferences
patch_asset_empty(data['asset_data']['id'], preferences.api_key)
return
......@@ -666,41 +489,6 @@ def get_materials_for_validation(page_size=100, max_results=100000000):
return filepath
# This gets all assets in the database through the/assets endpoint. Currently not used, since we use elastic for everything.
# def get_assets_list():
# bpy.app.debug_value = 2
#
# results = []
# preferences = bpy.context.preferences.addons['blenderkit'].preferences
# url = paths.get_api_url() + 'assets/all'
# i = 0
# while url is not None:
# headers = utils.get_headers(preferences.api_key)
# print('fetching assets from assets endpoint')
# print(url)
# retries = 0
# while retries < 3:
# r = rerequests.get(url, headers=headers)
#
# try:
# adata = r.json()
# url = adata.get('next')
# print(i)
# i += 1
# except Exception as e:
# print(e)
# print('failed to get next')
# if retries == 2:
# url = None
# if adata.get('results') != None:
# results.extend(adata['results'])
# retries = 3
# print(f'fetched page {i}')
# retries += 1
#
# fpath = assets_db_path()
# with open(fpath, 'w', encoding = 'utf-8') as s:
# json.dump(results, s, ensure_ascii=False, indent=4)
def load_assets_list(filepath):
......@@ -758,6 +546,7 @@ def generate_resolution_thread(asset_data, api_key):
'''
fpath = download_asset(asset_data, unpack=True, api_key=api_key)
if fpath:
if asset_data['assetType'] != 'hdr':
print('send to bg ', fpath)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment