Newer
Older
def request_profile(api_key):
a_url = paths.get_api_url() + 'me/'
headers = utils.get_headers(api_key)
adata = r.json()
if adata.get('user') is None:
utils.p(adata)
utils.p('getting profile failed')
return None
return adata
def fetch_profile(api_key):
utils.p('fetch profile')
try:
adata = request_profile(api_key)
if adata is not None:
tasks_queue.add_task((write_profile, (adata,)))
def get_profile():
preferences = bpy.context.preferences.addons['blenderkit'].preferences
a = bpy.context.window_manager.get('bkit profile')
thread = threading.Thread(target=fetch_profile, args=(preferences.api_key,), daemon=True)
thread.start()
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
def query_to_url(query = {}, params = {}):
# build a new request
url = paths.get_api_url() + 'search/'
# build request manually
# TODO use real queries
requeststring = '?query='
#
if query.get('query') not in ('', None):
requeststring += query['query'].lower()
for i, q in enumerate(query):
if q != 'query':
requeststring += '+'
requeststring += q + ':' + str(query[q]).lower()
# result ordering: _score - relevance, score - BlenderKit score
order = []
if params['free_first']:
order = ['-is_free', ]
if query.get('query') is None and query.get('category_subtree') == None:
# assumes no keywords and no category, thus an empty search that is triggered on start.
# orders by last core file upload
if query.get('verification_status') == 'uploaded':
# for validators, sort uploaded from oldest
order.append('created')
else:
order.append('-last_upload')
elif query.get('author_id') is not None and utils.profile_is_validator():
order.append('-created')
else:
if query.get('category_subtree') is not None:
order.append('-score,_score')
else:
order.append('_score')
requeststring += '+order:' + ','.join(order)
requeststring += '&addon_version=%s' % params['addon_version']
if params.get('scene_uuid') is not None:
requeststring += '&scene_uuid=%s' % params['scene_uuid']
# print('params', params)
urlquery = url + requeststring
return urlquery
def parse_html_formated_error(text):
report = text[text.find('<title>') + 7: text.find('</title>')]
return report
class Searcher(threading.Thread):
query = None
def __init__(self, query, params, orig_result, tempdir = '', headers = None, urlquery = ''):
super(Searcher, self).__init__()
self.query = query
self.params = params
self._stop_event = threading.Event()
self.tempdir = tempdir
self.headers = headers
self.urlquery = urlquery
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def run(self):
t = time.time()
mt('search thread started')
# tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
# json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])
rdata = {}
rdata['results'] = []
try:
utils.p(self.urlquery)
r = rerequests.get(self.urlquery, headers=self.headers) # , params = rparameters)
except requests.exceptions.RequestException as e:
error_description = parse_html_formated_error(r.text)
reports_queue.put(error_description)
tasks_queue.add_task((ui.add_report, (error_description, 10, colors.RED)))
if not rdata.get('results'):
utils.pprint(rdata)
# if the result was converted to json and didn't return results,
# it means it's a server error that has a clear message.
# That's why it gets processed in the update timer, where it can be passed in messages to user.
self.result = rdata
return
# print('number of results: ', len(rdata.get('results', [])))
return
mt('search finished')
i = 0
thumb_small_urls = []
thumb_small_filepaths = []
thumb_full_urls = []
thumb_full_filepaths = []
# END OF PARSING
for d in rdata.get('results', []):
thumb_small_urls.append(d["thumbnailSmallUrl"])
imgname = paths.extract_filename_from_url(d['thumbnailSmallUrl'])
imgpath = os.path.join(self.tempdir, imgname)
thumb_small_filepaths.append(imgpath)
if d["assetType"] == 'hdr':
larege_thumb_url = d['thumbnailMiddleUrlNonsquared']
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
else:
larege_thumb_url = d['thumbnailMiddleUrl']
thumb_full_urls.append(larege_thumb_url)
imgname = paths.extract_filename_from_url(larege_thumb_url)
imgpath = os.path.join(self.tempdir, imgname)
thumb_full_filepaths.append(imgpath)
# for f in d['files']:
# # TODO move validation of published assets to server, too manmy checks here.
# if f['fileType'] == 'thumbnail' and f['fileThumbnail'] != None and f['fileThumbnailLarge'] != None:
# if f['fileThumbnail'] == None:
# f['fileThumbnail'] = 'NONE'
# if f['fileThumbnailLarge'] == None:
# f['fileThumbnailLarge'] = 'NONE'
#
# thumb_small_urls.append(f['fileThumbnail'])
# thumb_full_urls.append(f['fileThumbnailLarge'])
#
# imgname = paths.extract_filename_from_url(f['fileThumbnail'])
# imgpath = os.path.join(self.tempdir, imgname)
# thumb_small_filepaths.append(imgpath)
#
# imgname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
# imgpath = os.path.join(self.tempdir, imgname)
# thumb_full_filepaths.append(imgpath)
sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
full_thbs = zip(thumb_full_filepaths, thumb_full_urls)
# we save here because a missing thumbnail check is in the previous loop
# we can also prepend previous results. These have downloaded thumbnails already...
rdata['results'][0:0] = self.result['results']
self.result = rdata
# with open(json_filepath, 'w', encoding = 'utf-8') as outfile:
# json.dump(rdata, outfile, ensure_ascii=False, indent=4)
killthreads_sml = []
for k in thumb_sml_download_threads.keys():
if k not in thumb_small_filepaths:
killthreads_sml.append(k) # do actual killing here?
killthreads_full = []
for k in thumb_full_download_threads.keys():
if k not in thumb_full_filepaths:
killthreads_full.append(k) # do actual killing here?
# TODO do the killing/ stopping here! remember threads might have finished inbetween!
if self.stopped():
return
# this loop handles downloading of small thumbnails
for imgpath, url in sml_thbs:
if imgpath not in thumb_sml_download_threads and not os.path.exists(imgpath):
thread = ThumbDownloader(url, imgpath)
# thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
# daemon=True)
thread.start()
thumb_sml_download_threads[imgpath] = thread
# threads.append(thread)
if len(thumb_sml_download_threads) > maxthreads:
while len(thumb_sml_download_threads) > maxthreads:
threads_copy = thumb_sml_download_threads.copy() # because for loop can erase some of the items.
for tk, thread in threads_copy.items():
if not thread.is_alive():
thread.join()
return
idx = 0
while len(thumb_sml_download_threads) > 0:
threads_copy = thumb_sml_download_threads.copy() # because for loop can erase some of the items.
for tk, thread in threads_copy.items():
if not thread.is_alive():
thread.join()
del (thumb_sml_download_threads[tk])
i += 1
if self.stopped():
return
# start downloading full thumbs in the end
for imgpath, url in full_thbs:
if imgpath not in thumb_full_download_threads and not os.path.exists(imgpath):
thread = ThumbDownloader(url, imgpath)
# thread = threading.Thread(target=download_thumbnail, args=([url, imgpath]),
# daemon=True)
thread.start()
thumb_full_download_threads[imgpath] = thread
mt('thumbnails finished')
def build_query_common(query, props):
'''add shared parameters to query'''
query_common = {}
query_common["query"] = props.search_keywords
if props.search_verification_status != 'ALL':
query_common['verification_status'] = props.search_verification_status.lower()
if props.unrated_only:
query["quality_count"] = 0
query_common["files_size_gte"] = props.search_file_size_min * 1024 * 1024
query_common["files_size_lte"] = props.search_file_size_max * 1024 * 1024
def build_query_model():
'''use all search input to request results from server'''
props = bpy.context.scene.blenderkit_models
query = {
"asset_type": 'model',
# "engine": props.search_engine,
# "adult": props.search_adult,
}
if props.search_style != 'ANY':
if props.search_style != 'OTHER':
query["model_style"] = props.search_style
query["model_style"] = props.search_style_other
Vilem Duha
committed
# the 'free_only' parametr gets moved to the search command and is used for ordering the assets as free first
# if props.free_only:
# query["is_free"] = True
Vilem Duha
committed
# if props.search_advanced:
if props.search_condition != 'UNSPECIFIED':
query["condition"] = props.search_condition
if props.search_design_year:
query["designYear_gte"] = props.search_design_year_min
query["designYear_lte"] = props.search_design_year_max
if props.search_polycount:
query["faceCount_gte"] = props.search_polycount_min
query["faceCount_lte"] = props.search_polycount_max
if props.search_texture_resolution:
query["textureResolutionMax_gte"] = props.search_texture_resolution_min
query["textureResolutionMax_lte"] = props.search_texture_resolution_max
build_query_common(query, props)
return query
def build_query_scene():
'''use all search input to request results from server'''
props = bpy.context.scene.blenderkit_scene
query = {
"asset_type": 'scene',
# "engine": props.search_engine,
# "adult": props.search_adult,
}
build_query_common(query, props)
return query
def build_query_HDR():
'''use all search input to request results from server'''
props = bpy.context.scene.blenderkit_HDR
query = {
"asset_type": 'hdr',
# "engine": props.search_engine,
# "adult": props.search_adult,
}
build_query_common(query, props)
return query
def build_query_material():
props = bpy.context.scene.blenderkit_mat
query = {
"asset_type": 'material',
}
# if props.search_engine == 'NONE':
# query["engine"] = ''
# if props.search_engine != 'OTHER':
# query["engine"] = props.search_engine
# else:
# query["engine"] = props.search_engine_other
if props.search_style != 'ANY':
if props.search_style != 'OTHER':
query["style"] = props.search_style
else:
query["style"] = props.search_style_other
if props.search_procedural == 'TEXTURE_BASED':
# todo this procedural hack should be replaced with the parameter
query["textureResolutionMax_gte"] = 0
# query["procedural"] = False
if props.search_texture_resolution:
query["textureResolutionMax_gte"] = props.search_texture_resolution_min
query["textureResolutionMax_lte"] = props.search_texture_resolution_max
elif props.search_procedural == "PROCEDURAL":
# todo this procedural hack should be replaced with the parameter
query["files_size_lte"] = 1024 * 1024
# query["procedural"] = True
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
build_query_common(query, props)
return query
def build_query_texture():
props = bpy.context.scene.blenderkit_tex
query = {
"asset_type": 'texture',
}
if props.search_style != 'ANY':
if props.search_style != 'OTHER':
query["search_style"] = props.search_style
else:
query["search_style"] = props.search_style_other
build_query_common(query, props)
return query
def build_query_brush():
props = bpy.context.scene.blenderkit_brush
brush_type = ''
if bpy.context.sculpt_object is not None:
brush_type = 'sculpt'
elif bpy.context.image_paint_object: # could be just else, but for future p
brush_type = 'texture_paint'
query = {
"asset_type": 'brush',
}
build_query_common(query, props)
return query
def mt(text):
global search_start_time, prev_time
alltime = time.time() - search_start_time
since_last = time.time() - prev_time
prev_time = time.time()
def add_search_process(query, params, orig_result):
global search_threads
while (len(search_threads) > 0):
old_thread = search_threads.pop(0)
old_thread[0].stop()
# TODO CARE HERE FOR ALSO KILLING THE Thumbnail THREADS.?
# AT LEAST NOW SEARCH DONE FIRST WON'T REWRITE AN NEWER ONE
tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
headers = utils.get_headers(params['api_key'])
if params['get_next']:
urlquery = orig_result['next']
if not params['get_next']:
urlquery = query_to_url(query, params)
thread = Searcher(query, params, orig_result, tempdir = tempdir, headers = headers, urlquery = urlquery)
search_threads.append([thread, tempdir, query['asset_type'], {}]) # 4th field is for results
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
mt('search thread started')
def get_search_simple(parameters, filepath=None, page_size=100, max_results=100000000, api_key=''):
'''
Searches and returns the
Parameters
----------
parameters - dict of blenderkit elastic parameters
filepath - a file to save the results. If None, results are returned
page_size - page size for retrieved results
max_results - max results of the search
api_key - BlenderKit api key
Returns
-------
Returns search results as a list, and optionally saves to filepath
'''
headers = utils.get_headers(api_key)
url = paths.get_api_url() + 'search/'
requeststring = url + '?query='
for p in parameters.keys():
requeststring += f'+{p}:{parameters[p]}'
requeststring += '&page_size=' + str(page_size)
bk_logger.debug(requeststring)
response = rerequests.get(requeststring, headers=headers) # , params = rparameters)
# print(r.json())
search_results = response.json()
results = []
results.extend(search_results['results'])
page_index = 2
page_count = math.ceil(search_results['count'] / page_size)
while search_results.get('next') and len(results) < max_results:
bk_logger.info(f'getting page {page_index} , total pages {page_count}')
response = rerequests.get(search_results['next'], headers=headers) # , params = rparameters)
search_results = response.json()
# print(search_results)
results.extend(search_results['results'])
page_index += 1
if not filepath:
return results
with open(filepath, 'w', encoding='utf-8') as s:
json.dump(results, s, ensure_ascii=False, indent=4)
bk_logger.info(f'retrieved {len(results)} assets from elastic search')
def search(category='', get_next=False, author_id=''):
''' initialize searching'''
global search_start_time
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
if not hasattr(scene, 'blenderkit'):
return;
props = scene.blenderkit_models
query = build_query_model()
if not hasattr(scene, 'blenderkit_scene'):
return;
props = scene.blenderkit_scene
query = build_query_scene()
if ui_props.asset_type == 'HDR':
if not hasattr(scene, 'blenderkit_HDR'):
return;
props = scene.blenderkit_HDR
query = build_query_HDR()
if not hasattr(scene, 'blenderkit_mat'):
return;
props = scene.blenderkit_mat
query = build_query_material()
if not hasattr(scene, 'blenderkit_tex'):
return;
# props = scene.blenderkit_tex
# query = build_query_texture()
if not hasattr(scene, 'blenderkit_brush'):
return;
props = scene.blenderkit_brush
query = build_query_brush()
# crop long searches
if query.get('query'):
if len(query['query']) > 50:
query['query'] = strip_accents(query['query'])
if len(query['query']) > 150:
idx = query['query'].find(' ', 142)
query['query'] = query['query'][:idx]
# it's possible get_net was requested more than once.
if props.is_searching and get_next == True:
return;
if utils.profile_is_validator() and user_preferences.categories_fix:
query['category'] = category
else:
query['category_subtree'] = category
query['author_id'] = author_id
elif props.own_only:
# if user searches for [another] author, 'only my assets' is invalid. that's why in elif.
profile = bpy.context.window_manager.get('bkit profile')
if profile is not None:
query['author_id'] = str(profile['user']['id'])
props.is_searching = True
params = {
'scene_uuid': bpy.context.scene.get('uuid', None),
'addon_version': version_checker.get_addon_version(),
'api_key': user_preferences.api_key,
'get_next': get_next,
'free_first': props.free_only
orig_results = bpy.context.window_manager.get(f'bkit {ui_props.asset_type.lower()} search orig', {})
# ensure it's a copy in dict for what we are passing to thread:
orig_results = orig_results.to_dict()
add_search_process(query, params, orig_results)
tasks_queue.add_task((ui.add_report, ('BlenderKit searching....', 2)))
props.report = 'BlenderKit searching....'
def search_update(self, context):
# if self.search_keywords != '':
ui_props = bpy.context.scene.blenderkitUI
if ui_props.down_up != 'SEARCH':
ui_props.down_up = 'SEARCH'
# here we tweak the input if it comes form the clipboard. we need to get rid of asset type and set it in UI
sprops = utils.get_search_props()
instr = 'asset_base_id:'
atstr = 'asset_type:'
kwds = sprops.search_keywords
idi = kwds.find(instr)
ati = kwds.find(atstr)
# if the asset type already isn't there it means this update function
# was triggered by it's last iteration and needs to cancel
if ati > -1:
at = kwds[ati:].lower()
# uncertain length of the remaining string - find as better method to check the presence of asset type
if at.find('model') > -1:
ui_props.asset_type = 'MODEL'
elif at.find('material') > -1:
ui_props.asset_type = 'MATERIAL'
elif at.find('brush') > -1:
ui_props.asset_type = 'BRUSH'
elif at.find('scene') > -1:
ui_props.asset_type = 'SCENE'
elif at.find('hdr') > -1:
ui_props.asset_type = 'HDR'
# now we trim the input copypaste by anything extra that is there,
# this is also a way for this function to recognize that it already has parsed the clipboard
# the search props can have changed and this needs to transfer the data to the other field
# this complex behaviour is here for the case where the user needs to paste manually into blender?
sprops = utils.get_search_props()
sprops.search_keywords = kwds[:ati].rstrip()
# return here since writing into search keywords triggers this update function once more.
search()
# accented_string is of type 'unicode'
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
class SearchOperator(Operator):
"""Tooltip"""
bl_idname = "view3d.blenderkit_search"
bl_label = "BlenderKit asset search"
bl_description = "Search online for assets"
own: BoolProperty(name="own assets only",
description="Find all own assets",
default=False)
category: StringProperty(
name="category",
description="search only subtree of this category",
author_id: StringProperty(
name="Author ID",
description="Author ID - search only assets by this author",
get_next: BoolProperty(name="next page",
description="get next page from previous search",
name="Keywords",
description="Keywords",
tooltip: bpy.props.StringProperty(default='Runs search and displays the asset bar at the same time')
@classmethod
def description(cls, context, properties):
return properties.tooltip
@classmethod
def poll(cls, context):
return True
def execute(self, context):
# TODO ; this should all get transferred to properties of the search operator, so sprops don't have to be fetched here at all.
sprops = utils.get_search_props()
if self.author_id != '':
sprops.search_keywords = ''
if self.keywords != '':
sprops.search_keywords = self.keywords
search(category=self.category, get_next=self.get_next, author_id=self.author_id)
class UrlOperator(Operator):
""""""
bl_idname = "wm.blenderkit_url"
bl_label = ""
bl_description = "Search online for assets"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
tooltip: bpy.props.StringProperty(default='Open a web page')
url: bpy.props.StringProperty(default='Runs search and displays the asset bar at the same time')
@classmethod
def description(cls, context, properties):
return properties.tooltip
def execute(self,context):
bpy.ops.wm.url_open(url=self.url)
]
def register_search():
bpy.app.handlers.load_post.append(scene_load)
for c in classes:
bpy.utils.register_class(c)
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
if user_preferences.use_timers:
bpy.app.timers.register(timer_update)
categories.load_categories()
def unregister_search():
bpy.app.handlers.load_post.remove(scene_load)
for c in classes:
bpy.utils.unregister_class(c)
if bpy.app.timers.is_registered(timer_update):
bpy.app.timers.unregister(timer_update)