Skip to content
Snippets Groups Projects
Commit d00a25d3 authored by Vilém Duha's avatar Vilém Duha
Browse files

BlenderKit: improve search code

Reorganizing search code to be more readable and get some functions from thread that shouldn't be there
parent 8a1a2e24
Branches
Tags
No related merge requests found
...@@ -238,6 +238,8 @@ def parse_result(r): ...@@ -238,6 +238,8 @@ def parse_result(r):
# utils.p('asset with no files-size') # utils.p('asset with no files-size')
asset_type = r['assetType'] asset_type = r['assetType']
if len(r['files']) > 0:#TODO remove this condition so all assets are parsed. if len(r['files']) > 0:#TODO remove this condition so all assets are parsed.
get_author(r)
r['available_resolutions'] = [] r['available_resolutions'] = []
allthumbs = [] allthumbs = []
durl, tname, small_tname = '', '', '' durl, tname, small_tname = '', '', ''
...@@ -642,9 +644,9 @@ def generate_tooltip(mdata): ...@@ -642,9 +644,9 @@ def generate_tooltip(mdata):
# t += 'uv: %s\n' % mdata['uv'] # t += 'uv: %s\n' % mdata['uv']
# t += '\n' # t += '\n'
if mdata.get('license') == 'cc_zero': if mdata.get('license') == 'cc_zero':
t+= 'license: CC Zero' t+= 'license: CC Zero\n'
else: else:
t+= 'license: Royalty free' t+= 'license: Royalty free\n'
# t = writeblockm(t, mdata, key='license', width=col_w) # t = writeblockm(t, mdata, key='license', width=col_w)
fs = mdata.get('files') fs = mdata.get('files')
...@@ -660,9 +662,9 @@ def generate_tooltip(mdata): ...@@ -660,9 +662,9 @@ def generate_tooltip(mdata):
t += resolutions.replace('_', '.') t += resolutions.replace('_', '.')
if mdata['isFree']: if mdata['isFree']:
t += 'FREE plan\n' t += 'Free plan\n'
else: else:
t += 'FULL plan\n' t += 'Full plan\n'
else: else:
if fs: if fs:
for f in fs: for f in fs:
...@@ -899,16 +901,62 @@ def get_profile(): ...@@ -899,16 +901,62 @@ def get_profile():
thread.start() thread.start()
return a return a
def query_to_url(query = {}, params = {}):
# build a new request
url = paths.get_api_url() + 'search/'
# build request manually
# TODO use real queries
requeststring = '?query='
#
if query.get('query') not in ('', None):
requeststring += query['query'].lower()
for i, q in enumerate(query):
if q != 'query':
requeststring += '+'
requeststring += q + ':' + str(query[q]).lower()
# result ordering: _score - relevance, score - BlenderKit score
order = []
if params['free_first']:
order = ['-is_free', ]
if query.get('query') is None and query.get('category_subtree') == None:
# assumes no keywords and no category, thus an empty search that is triggered on start.
# orders by last core file upload
if query.get('verification_status') == 'uploaded':
# for validators, sort uploaded from oldest
order.append('created')
else:
order.append('-last_upload')
elif query.get('author_id') is not None and utils.profile_is_validator():
order.append('-created')
else:
if query.get('category_subtree') is not None:
order.append('-score,_score')
else:
order.append('_score')
requeststring += '+order:' + ','.join(order)
requeststring += '&addon_version=%s' % params['addon_version']
if params.get('scene_uuid') is not None:
requeststring += '&scene_uuid=%s' % params['scene_uuid']
# print('params', params)
urlquery = url + requeststring
return urlquery
class Searcher(threading.Thread): class Searcher(threading.Thread):
query = None query = None
def __init__(self, query, params, orig_result): def __init__(self, query, params, orig_result, tempdir = '', headers = None, urlquery = ''):
super(Searcher, self).__init__() super(Searcher, self).__init__()
self.query = query self.query = query
self.params = params self.params = params
self._stop_event = threading.Event() self._stop_event = threading.Event()
self.result = orig_result self.result = orig_result
self.tempdir = tempdir
self.headers = headers
self.urlquery = urlquery
def stop(self): def stop(self):
self._stop_event.set() self._stop_event.set()
...@@ -916,52 +964,6 @@ class Searcher(threading.Thread): ...@@ -916,52 +964,6 @@ class Searcher(threading.Thread):
def stopped(self): def stopped(self):
return self._stop_event.is_set() return self._stop_event.is_set()
def query_to_url(self):
query = self.query
params = self.params
# build a new request
url = paths.get_api_url() + 'search/'
# build request manually
# TODO use real queries
requeststring = '?query='
#
if query.get('query') not in ('', None):
requeststring += query['query'].lower()
for i, q in enumerate(query):
if q != 'query':
requeststring += '+'
requeststring += q + ':' + str(query[q]).lower()
# result ordering: _score - relevance, score - BlenderKit score
order = []
if params['free_first']:
order = ['-is_free', ]
if query.get('query') is None and query.get('category_subtree') == None:
# assumes no keywords and no category, thus an empty search that is triggered on start.
# orders by last core file upload
if query.get('verification_status') == 'uploaded':
# for validators, sort uploaded from oldest
order.append('created')
else:
order.append('-last_upload')
elif query.get('author_id') is not None and utils.profile_is_validator():
order.append('-created')
else:
if query.get('category_subtree') is not None:
order.append('-score,_score')
else:
order.append('_score')
requeststring += '+order:' + ','.join(order)
requeststring += '&addon_version=%s' % params['addon_version']
if params.get('scene_uuid') is not None:
requeststring += '&scene_uuid=%s' % params['scene_uuid']
# print('params', params)
urlquery = url + requeststring
return urlquery
def run(self): def run(self):
maxthreads = 50 maxthreads = 50
query = self.query query = self.query
...@@ -970,22 +972,16 @@ class Searcher(threading.Thread): ...@@ -970,22 +972,16 @@ class Searcher(threading.Thread):
t = time.time() t = time.time()
mt('search thread started') mt('search thread started')
tempdir = paths.get_temp_dir('%s_search' % query['asset_type']) # tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
# json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type']) # json_filepath = os.path.join(tempdir, '%s_searchresult.json' % query['asset_type'])
headers = utils.get_headers(params['api_key'])
rdata = {} rdata = {}
rdata['results'] = [] rdata['results'] = []
if params['get_next']:
urlquery = self.result['next']
if not params['get_next']:
urlquery = self.query_to_url()
try: try:
utils.p(urlquery) utils.p(self.urlquery)
r = rerequests.get(urlquery, headers=headers) # , params = rparameters) r = rerequests.get(self.urlquery, headers=self.headers) # , params = rparameters)
# print(r.url) # print(r.url)
reports = '' reports = ''
# utils.p(r.text) # utils.p(r.text)
...@@ -1024,8 +1020,6 @@ class Searcher(threading.Thread): ...@@ -1024,8 +1020,6 @@ class Searcher(threading.Thread):
# END OF PARSING # END OF PARSING
for d in rdata.get('results', []): for d in rdata.get('results', []):
get_author(d)
for f in d['files']: for f in d['files']:
# TODO move validation of published assets to server, too manmy checks here. # TODO move validation of published assets to server, too manmy checks here.
if f['fileType'] == 'thumbnail' and f['fileThumbnail'] != None and f['fileThumbnailLarge'] != None: if f['fileType'] == 'thumbnail' and f['fileThumbnail'] != None and f['fileThumbnailLarge'] != None:
...@@ -1038,11 +1032,11 @@ class Searcher(threading.Thread): ...@@ -1038,11 +1032,11 @@ class Searcher(threading.Thread):
thumb_full_urls.append(f['fileThumbnailLarge']) thumb_full_urls.append(f['fileThumbnailLarge'])
imgname = paths.extract_filename_from_url(f['fileThumbnail']) imgname = paths.extract_filename_from_url(f['fileThumbnail'])
imgpath = os.path.join(tempdir, imgname) imgpath = os.path.join(self.tempdir, imgname)
thumb_small_filepaths.append(imgpath) thumb_small_filepaths.append(imgpath)
imgname = paths.extract_filename_from_url(f['fileThumbnailLarge']) imgname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
imgpath = os.path.join(tempdir, imgname) imgpath = os.path.join(self.tempdir, imgname)
thumb_full_filepaths.append(imgpath) thumb_full_filepaths.append(imgpath)
sml_thbs = zip(thumb_small_filepaths, thumb_small_urls) sml_thbs = zip(thumb_small_filepaths, thumb_small_urls)
...@@ -1291,9 +1285,16 @@ def add_search_process(query, params, orig_result): ...@@ -1291,9 +1285,16 @@ def add_search_process(query, params, orig_result):
old_thread = search_threads.pop(0) old_thread = search_threads.pop(0)
old_thread[0].stop() old_thread[0].stop()
# TODO CARE HERE FOR ALSO KILLING THE Thumbnail THREADS.? # TODO CARE HERE FOR ALSO KILLING THE Thumbnail THREADS.?
# AT LEAST NOW SEARCH DONE FIRST WON'T REWRITE AN OLDER ONE # AT LEAST NOW SEARCH DONE FIRST WON'T REWRITE AN NEWER ONE
tempdir = paths.get_temp_dir('%s_search' % query['asset_type']) tempdir = paths.get_temp_dir('%s_search' % query['asset_type'])
thread = Searcher(query, params, orig_result) headers = utils.get_headers(params['api_key'])
if params['get_next']:
urlquery = orig_result['next']
if not params['get_next']:
urlquery = query_to_url(query, params)
thread = Searcher(query, params, orig_result, tempdir = tempdir, headers = headers, urlquery = urlquery)
thread.start() thread.start()
search_threads.append([thread, tempdir, query['asset_type'], {}]) # 4th field is for results search_threads.append([thread, tempdir, query['asset_type'], {}]) # 4th field is for results
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment