diff --git a/.arcconfig b/.arcconfig
new file mode 100644
index 0000000000000000000000000000000000000000..16cc29d9c979aef5303d279bac7dc22b1ebd8103
--- /dev/null
+++ b/.arcconfig
@@ -0,0 +1,6 @@
+{
+	"project_id" : "Blender Dev Tools",
+	"conduit_uri" : "https://developer.blender.org/",
+	"git.default-relative-commit" : "origin/master",
+	"arc.land.update.default" : "rebase"
+}
\ No newline at end of file
diff --git a/check_blender_release/check_module_enabled.py b/check_blender_release/check_module_enabled.py
new file mode 100644
index 0000000000000000000000000000000000000000..158a5386a9ddb7d9848133a58a7fe85c40dbae18
--- /dev/null
+++ b/check_blender_release/check_module_enabled.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Sergey Sharybin
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+import unittest
+
+from check_utils import (sliceCommandLineArguments,
+                         SceiptUnitTesting)
+
+
+class UnitTesting(SceiptUnitTesting):
+    def test_modulesEnabled(self):
+        self.checkScript("modules_enabled")
+
+
+def main():
+    # Slice command line arguments by '--'
+    unittest_args, parser_args = sliceCommandLineArguments()
+    # Construct and run unit tests.
+    unittest.main(argv=unittest_args)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/check_blender_release/check_module_numpy.py b/check_blender_release/check_module_numpy.py
new file mode 100644
index 0000000000000000000000000000000000000000..36c90006807531cdb44b0ca5441ed042a6b0511f
--- /dev/null
+++ b/check_blender_release/check_module_numpy.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Sergey Sharybin
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+import unittest
+
+from check_utils import (sliceCommandLineArguments,
+                         SceiptUnitTesting)
+
+
+class UnitTesting(SceiptUnitTesting):
+    def test_numpyImports(self):
+        self.checkScript("numpy_import")
+
+    def test_numpyBasicOperation(self):
+        self.checkScript("numpy_basic_operation")
+
+
+def main():
+    # Slice command line arguments by '--'
+    unittest_args, parser_args = sliceCommandLineArguments()
+    # Construct and run unit tests.
+    unittest.main(argv=unittest_args)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/check_blender_release/check_module_requests.py b/check_blender_release/check_module_requests.py
new file mode 100644
index 0000000000000000000000000000000000000000..af8f6e3b8e43f03e17fda167d6a3f974a7445e8f
--- /dev/null
+++ b/check_blender_release/check_module_requests.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Sergey Sharybin
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+import unittest
+
+from check_utils import (sliceCommandLineArguments,
+                         SceiptUnitTesting)
+
+
+class UnitTesting(SceiptUnitTesting):
+    def test_requestsImports(self):
+        self.checkScript("requests_import")
+
+    def test_requestsBasicAccess(self):
+        self.checkScript("requests_basic_access")
+
+
+def main():
+    # Slice command line arguments by '--'
+    unittest_args, parser_args = sliceCommandLineArguments()
+    # Construct and run unit tests.
+    unittest.main(argv=unittest_args)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/check_blender_release/check_release.py b/check_blender_release/check_release.py
new file mode 100755
index 0000000000000000000000000000000000000000..6275f79b0833160135ccc2f7e1b8fad63c2669fb
--- /dev/null
+++ b/check_blender_release/check_release.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Sergey Sharybin
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+# Usage: ./check_release.py -- ../path/to/release/folder
+
+
+import os
+import sys
+import unittest
+
+import check_module_enabled
+import check_module_numpy
+import check_module_requests
+import check_static_binaries
+from check_utils import sliceCommandLineArguments
+
+
+def load_tests(loader, standard_tests, pattern):
+    standard_tests.addTests(loader.loadTestsFromTestCase(
+            check_module_enabled.UnitTesting))
+    standard_tests.addTests(loader.loadTestsFromTestCase(
+            check_module_numpy.UnitTesting))
+    standard_tests.addTests(loader.loadTestsFromTestCase(
+            check_module_requests.UnitTesting))
+    standard_tests.addTests(loader.loadTestsFromTestCase(
+            check_static_binaries.UnitTesting))
+    return standard_tests
+
+
+def main():
+    # Slice command line arguments by '--'
+    unittest_args, parser_args = sliceCommandLineArguments()
+    # Construct and run unit tests.
+    unittest.main(argv=unittest_args)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/check_blender_release/check_static_binaries.py b/check_blender_release/check_static_binaries.py
new file mode 100644
index 0000000000000000000000000000000000000000..198796bce1863776925bdd2d7aff220f4d4d57c9
--- /dev/null
+++ b/check_blender_release/check_static_binaries.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Sergey Sharybin
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+import os
+from pathlib import Path
+import re
+import subprocess
+import sys
+import unittest
+
+from check_utils import (sliceCommandLineArguments,
+                         parseArguments)
+
+
+ALLOWED_LIBS = (
+    # Core C/C++ libraries
+    "ld-linux.so",
+    "ld-linux-x86-64.so",
+    "libc.so",
+    "libm.so",
+    "libstdc++.so",
+    "libdl.so",
+    "libpthread.so",
+    "libgcc_s.so",
+    "librt.so",
+    "libutil.so",
+
+    # X11 libraries we don't link statically,
+    "libX11.so",
+    "libXext.so",
+    "libXrender.so",
+    "libXxf86vm.so",
+    "libXi.so",
+
+    # OpenGL libraries.
+    "libGL.so",
+    "libGLU.so",
+
+    # Own dependencies we don't link statically.
+    "libfreetype.so",
+)
+
+IGNORE_FILES = ("blender-softwaregl", )
+IGNORE_EXTENSION = (".sh", ".py", )
+
+
+# Library dependencies.
+
+def getNeededLibrariesLDD(binary_filepath):
+    """
+    This function uses ldd to collect libraries which binary depends on.
+
+    Not totally safe since ldd might actually execute the binary to get it's
+    symbols and will also collect indirect dependnecies which might not be
+    desired.
+
+    Has advantage of telling that some dependnecy library is not found.
+    """
+    ldd_command = ("ldd", str(binary_filepath))
+    ldd_output = subprocess.check_output(ldd_command, stderr=subprocess.STDOUT)
+    lines = ldd_output.decode().split("\n")
+    libraries = []
+    for line in lines:
+        line = line.strip()
+        if not line:
+            continue
+        lib_name = line.split("=>")[0]
+        lib_name = lib_name.split(" (")[0].strip()
+        lib_file_name = os.path.basename(lib_name)
+        libraries.append(lib_file_name)
+    return libraries
+
+
+def getNeededLibrariesOBJDUMP(binary_filepath):
+    """
+    This function uses objdump to get direct dependencies of a given binary.
+
+    Totally safe, but will require manual check over libraries which are not
+    found on the system.
+    """
+    objdump_command = ("objdump", "-p", str(binary_filepath))
+    objdump_output = subprocess.check_output(objdump_command,
+                                             stderr=subprocess.STDOUT)
+    lines = objdump_output.decode().split("\n")
+    libraries = []
+    for line in lines:
+        line = line.strip()
+        if not line:
+            continue
+        if not line.startswith("NEEDED"):
+            continue
+        lib_name = line[6:].strip()
+        libraries.append(lib_name)
+    return libraries
+
+
+def getNeededLibraries(binary_filepath):
+    """
+    Get all libraries given binary depends on.
+    """
+    if False:
+        return getNeededLibrariesLDD(binary_filepath)
+    else:
+        return getNeededLibrariesOBJDUMP(binary_filepath)
+
+
+def stripLibraryABI(lib_name):
+    """
+    Strip ABI suffix from .so file
+
+    Example; libexample.so.1.0 => libexample.so
+    """
+    lib_name_no_abi = lib_name
+    # TOOD(sergey): Optimize this!
+    while True:
+        no_abi = re.sub(r"\.[0-9]+$", "", lib_name_no_abi)
+        if lib_name_no_abi == no_abi:
+            break
+        lib_name_no_abi = no_abi
+    return lib_name_no_abi
+
+
+class UnitTesting(unittest.TestCase):
+    def checkBinary(self, binary_filepath):
+        """
+        Check given binary file to be a proper static self-sufficient.
+        """
+
+        libraries = getNeededLibraries(binary_filepath)
+        for lib_name in libraries:
+            lib_name_no_abi = stripLibraryABI(lib_name)
+            self.assertTrue(lib_name_no_abi in ALLOWED_LIBS,
+                            "Error detected in {}: library used {}" . format(
+                                binary_filepath, lib_name))
+
+    def checkDirectory(self, directory):
+        """
+        Recursively traverse directory and check every binary in in.
+        """
+
+        for path in Path(directory).rglob("*"):
+            # Ignore any checks on directory.
+            if path.is_dir():
+                continue
+            # Ignore script files.
+            if path.name in IGNORE_FILES:
+                continue
+            if path.suffix in IGNORE_EXTENSION:
+                continue
+            # Check any executable binary,
+            if path.stat().st_mode & 0o111 != 0:
+                self.checkBinary(path)
+            # Check all dynamic libraries.
+            elif path.suffix == ".so":
+                self.checkBinary(path)
+
+    def test_directoryIsStatic(self):
+        # Parse arguments which are not handled by unit testing framework.
+        args = parseArguments()
+        # Do some sanity checks first.
+        self.assertTrue(os.path.exists(args.directory),
+                        "Given directory does not exist: {}" .
+                        format(args.directory))
+        self.assertTrue(os.path.isdir(args.directory),
+                        "Given path is not a directory: {}" .
+                        format(args.directory))
+        # Perform actual test,
+        self.checkDirectory(args.directory)
+
+
+def main():
+    # Slice command line arguments by '--'
+    unittest_args, parser_args = sliceCommandLineArguments()
+    # Construct and run unit tests.
+    unittest.main(argv=unittest_args)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/check_blender_release/check_utils.py b/check_blender_release/check_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..42679cd16ec420b530b6b7f04b9c6f2a2e9e9b7c
--- /dev/null
+++ b/check_blender_release/check_utils.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Sergey Sharybin
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+
+import unittest
+
+
+def sliceCommandLineArguments():
+    """
+    Slice command line arguments by -- argument.
+    """
+
+    import sys
+
+    try:
+        double_shasl_index = sys.argv.index("--")
+    except ValueError:
+        unittest_args = sys.argv[:]
+        parser_args = []
+    else:
+        unittest_args = sys.argv[:double_shasl_index]
+        parser_args = sys.argv[double_shasl_index + 1:]
+
+    return unittest_args, parser_args
+
+
+def parseArguments():
+    import argparse
+
+    # Construct argument parser.
+    parser = argparse.ArgumentParser(description="Static binary checker")
+    parser.add_argument('directory', help='Directories to check')
+    # Parse arguments which are not handled by unit testing framework.
+    unittest_args, parser_args = sliceCommandLineArguments()
+    args = parser.parse_args(args=parser_args)
+    # TODO(sergey): Run some checks here?
+    return args
+
+
+def runScriptInBlender(blender_directory, script):
+    """
+    Run given script inside Blender and check non-zero exit code
+    """
+
+    import os
+    import subprocess
+
+    blender = os.path.join(blender_directory, "blender")
+    python = os.path.join(os.path.dirname(__file__), "scripts", script) + ".py"
+
+    command = (blender,
+               "-b",
+               "--factory-startup",
+               "--python-exit-code", "1",
+               "--python", python)
+
+    process = subprocess.Popen(command,
+                               shell=False,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.STDOUT)
+    output, error = process.communicate()
+    return process.returncode == 0
+
+
+class SceiptUnitTesting(unittest.TestCase):
+    def checkScript(self, script):
+        # Parse arguments which are not handled by unit testing framework.
+        args = parseArguments()
+        # Perform actual test,
+        self.assertTrue(runScriptInBlender(args.directory, script),
+                        "Failed to run script {}" . format(script))
diff --git a/check_blender_release/scripts/modules_enabled.py b/check_blender_release/scripts/modules_enabled.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a83ec6b22643c633d1556fab6ec809794846fa4
--- /dev/null
+++ b/check_blender_release/scripts/modules_enabled.py
@@ -0,0 +1,5 @@
+import _sha1
+import _sha256
+import _md5
+import ssl
+import multiprocessing.synchronize
diff --git a/check_blender_release/scripts/numpy_basic_operation.py b/check_blender_release/scripts/numpy_basic_operation.py
new file mode 100644
index 0000000000000000000000000000000000000000..54c581bbf6551f0404b3b53e9552d9f60ec8eb44
--- /dev/null
+++ b/check_blender_release/scripts/numpy_basic_operation.py
@@ -0,0 +1,6 @@
+# This code tests bug reported in T50703
+
+import numpy
+
+a = numpy.array([[3, 2, 0], [3, 1, 0]], dtype=numpy.int32)
+a[0]
diff --git a/check_blender_release/scripts/numpy_import.py b/check_blender_release/scripts/numpy_import.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2e5936fe3786bc72d6bf8f7278f733571c09e48
--- /dev/null
+++ b/check_blender_release/scripts/numpy_import.py
@@ -0,0 +1 @@
+import numpy
diff --git a/check_blender_release/scripts/requests_basic_access.py b/check_blender_release/scripts/requests_basic_access.py
new file mode 100644
index 0000000000000000000000000000000000000000..09d6f61796cccdaba15a17b39f2dc1923b623bca
--- /dev/null
+++ b/check_blender_release/scripts/requests_basic_access.py
@@ -0,0 +1,8 @@
+import requests
+
+r = requests.get("https://blender.org/", verify=True)
+
+assert(r.status_code == 200)
+assert(r.reason == "OK")
+assert(True if r.ok else False)
+assert(len(r.content) > 256)
diff --git a/check_blender_release/scripts/requests_import.py b/check_blender_release/scripts/requests_import.py
new file mode 100644
index 0000000000000000000000000000000000000000..20b15530dd259d47a077bafde4e99ff35e656c80
--- /dev/null
+++ b/check_blender_release/scripts/requests_import.py
@@ -0,0 +1 @@
+import requests
diff --git a/check_source/check_descriptions.py b/check_source/check_descriptions.py
index 327d2c989426b01c0aa9bfcd438fe5ceaa96be9a..e384872521fceedaee2aadfd0a2222f8ec26c966 100644
--- a/check_source/check_descriptions.py
+++ b/check_source/check_descriptions.py
@@ -97,12 +97,13 @@ DUPLICATE_WHITELIST = (
     ('OBJECT_OT_bake', 'OBJECT_OT_bake_image'),
     ('OBJECT_OT_duplicate_move', 'OBJECT_OT_duplicate_move_linked'),
     ('WM_OT_context_cycle_enum', 'WM_OT_context_toggle', 'WM_OT_context_toggle_enum'),
-    ('WM_OT_context_set_boolean', 'WM_OT_context_set_enum', 'WM_OT_context_set_float', 'WM_OT_context_set_int', 'WM_OT_context_set_string', 'WM_OT_context_set_value'),
-    )
+    ('WM_OT_context_set_boolean', 'WM_OT_context_set_enum', 'WM_OT_context_set_float',
+     'WM_OT_context_set_int', 'WM_OT_context_set_string', 'WM_OT_context_set_value'),
+)
 
 DUPLICATE_IGNORE = {
     "",
-    }
+}
 
 
 def check_duplicates():
diff --git a/check_source/check_spelling.py b/check_source/check_spelling.py
index 8a047c90c953b6ade37d2d35d494f1719b488c8a..20700a210671f05f5bee065bb40ef7732f924c6c 100755
--- a/check_source/check_spelling.py
+++ b/check_source/check_spelling.py
@@ -47,9 +47,10 @@ else:
 import enchant
 dict_spelling = enchant.Dict("en_US")
 
-from check_spelling_c_config import (dict_custom,
-                                     dict_ignore,
-                                     )
+from check_spelling_c_config import (
+    dict_custom,
+    dict_ignore,
+)
 
 
 def words_from_text(text):
@@ -118,11 +119,12 @@ def words_from_text(text):
 
 
 class Comment:
-    __slots__ = ("file",
-                 "text",
-                 "line",
-                 "type",
-                 )
+    __slots__ = (
+        "file",
+        "text",
+        "line",
+        "type",
+    )
 
     def __init__(self, file, text, line, type):
         self.file = file
@@ -183,10 +185,10 @@ def extract_c_comments(filepath):
         r"\param[in,out]",
         r"\param",
         r"\page",
-        )
+    )
     SKIP_COMMENTS = (
         "BEGIN GPL LICENSE BLOCK",
-        )
+    )
 
     # http://doc.qt.nokia.com/qtcreator-2.4/creator-task-lists.html#task-list-file-format
     # file\tline\ttype\tdescription
@@ -330,7 +332,7 @@ def spell_check_comments_recursive(dirpath):
     def source_list(path, filename_check=None):
         for dirpath, dirnames, filenames in os.walk(path):
 
-            # skip '.svn'
+            # skip '.git'
             if dirpath.startswith("."):
                 continue
 
@@ -341,7 +343,20 @@ def spell_check_comments_recursive(dirpath):
 
     def is_source(filename):
         ext = splitext(filename)[1]
-        return (ext in {".c", ".inl", ".cpp", ".cxx", ".hpp", ".hxx", ".h", ".hh", ".m", ".mm", ".osl", ".py"})
+        return (ext in {
+            ".c",
+            ".inl",
+            ".cpp",
+            ".cxx",
+            ".hpp",
+            ".hxx",
+            ".h",
+            ".hh",
+            ".m",
+            ".mm",
+            ".osl",
+            ".py",
+        })
 
     for filepath in source_list(dirpath, is_source):
         spell_check_comments(filepath)
diff --git a/check_source/check_spelling_c_config.py b/check_source/check_spelling_c_config.py
index 844dc9e39257c623cc37932944318a00d2969476..8a86a54debc49e425305bad3374347a3ca978948 100644
--- a/check_source/check_spelling_c_config.py
+++ b/check_source/check_spelling_c_config.py
@@ -23,6 +23,7 @@
 # correct spelling but ignore
 dict_custom = {
     "adjoint", "adjugate",
+    "allocator",
     "atomicity",
     "boolean",
     "decrement",
@@ -117,6 +118,7 @@ dict_custom = {
     "ack",
     "amiga",
     "bzflag",
+    "ffmpeg",
     "freebsd",
     "irix",
     "kde",
diff --git a/check_source/check_style_c.py b/check_source/check_style_c.py
index 4097c0eaa03c93f2454a15665021ee87c6f5ba49..f115ce46332021610935f7948b814aac14bb4747 100755
--- a/check_source/check_style_c.py
+++ b/check_source/check_style_c.py
@@ -79,7 +79,7 @@ class TokStore:
         "text",
         "line",
         "flag",
-        )
+    )
 
     def __init__(self, type, text, line):
         self.type = type
@@ -349,7 +349,7 @@ def extract_cast(index):
 
     return (i_start, i_end)
 
-A = print
+
 def tk_range_find_by_type(index_start, index_end, type_, filter_tk=None):
     if index_start < index_end:
         for i in range(index_start, index_end + 1):
@@ -396,14 +396,14 @@ def blender_check_kw_if(index_kw_start, index_kw, index_kw_end):
         # check for: if ()
         #            {
         # note: if the if statement is multi-line we allow it
-        if     ((tokens[index_kw].line == tokens[index_kw_end].line) and
+        if ((tokens[index_kw].line == tokens[index_kw_end].line) and
                 (tokens[index_kw].line == tokens[index_next].line - 1)):
 
-            if     ((tokens[index_kw].line + 1 != tokens[index_next].line) and
-                    (tk_range_find_by_type(index_kw + 1, index_next - 1, Token.Comment.Preproc,
-                            filter_tk=lambda tk: tk.text in {
-                                "if", "ifdef", "ifndef", "else", "elif", "endif"}) != -1)
-                    ):
+            if ((tokens[index_kw].line + 1 != tokens[index_next].line) and
+                (tk_range_find_by_type(index_kw + 1, index_next - 1, Token.Comment.Preproc,
+                                       filter_tk=lambda tk: tk.text in {
+                                           "if", "ifdef", "ifndef", "else", "elif", "endif"}) != -1)):
+
                 # allow this to go unnoticed
                 pass
             else:
@@ -430,12 +430,12 @@ def blender_check_kw_if(index_kw_start, index_kw, index_kw_end):
         # check for correct single line use & indentation
         if not (tokens[index_next].type == Token.Punctuation and tokens[index_next].text == ";"):
             if tokens[index_next].type == Token.Keyword and tokens[index_next].text in {"if", "while", "for"}:
-                    ws_kw = extract_ws_indent(index_kw)
-                    ws_end = extract_ws_indent(index_next)
-                    if len(ws_kw) + 1 != len(ws_end):
-                        warning("E200", "bad single line indent '%s (...) {'" %
-                                tokens[index_kw].text, index_kw, index_next)
-                    del ws_kw, ws_end
+                ws_kw = extract_ws_indent(index_kw)
+                ws_end = extract_ws_indent(index_next)
+                if len(ws_kw) + 1 != len(ws_end):
+                    warning("E200", "bad single line indent '%s (...) {'" %
+                            tokens[index_kw].text, index_kw, index_next)
+                del ws_kw, ws_end
             else:
                 index_end = tk_advance_to_token(index_next, 1, ";", Token.Punctuation)
                 if tokens[index_kw].line != tokens[index_end].line:
@@ -501,7 +501,7 @@ def blender_check_kw_if(index_kw_start, index_kw, index_kw_end):
                     # use startswith because there are function calls within 'if' checks sometimes.
                     ws_indent_test = extract_to_linestart(i + 1)
                     # print("indent: %r   %s" % (ws_indent_test, tokens[i].text))
-                    #if ws_indent_test != ws_indent:
+                    # if ws_indent_test != ws_indent:
 
                     if ws_indent_test.startswith(ws_indent):
                         pass
@@ -540,7 +540,7 @@ def blender_check_kw_else(index_kw):
     #
     # check for this case since this is needed sometimes:
     # else     { a = 1; }
-    if     ((tokens[index_kw].line == tokens[i_next].line) and
+    if ((tokens[index_kw].line == tokens[i_next].line) and
             (tokens[index_kw + 1].type == Token.Text) and
             (len(tokens[index_kw + 1].text) > 1) and
             (tokens[index_kw + 1].text.isspace())):
@@ -580,24 +580,23 @@ def blender_check_kw_else(index_kw):
             #     else
             # #endif
             #     if
-            if     ((tokens[index_kw].line + 1 != tokens[i_next].line) and
-                    any(True for i in range(index_kw + 1, i_next)
-                        if (tokens[i].type == Token.Comment.Preproc and
-                            tokens[i].text.lstrip("# \t").startswith((
-                                "if", "ifdef", "ifndef",
-                                "else", "elif", "endif",
-                                ))
-                            )
+            if ((tokens[index_kw].line + 1 != tokens[i_next].line) and
+                any(True for i in range(index_kw + 1, i_next)
+                    if (tokens[i].type == Token.Comment.Preproc and
+                        tokens[i].text.lstrip("# \t").startswith((
+                            "if", "ifdef", "ifndef",
+                            "else", "elif", "endif",
+                        ))
                         )
-                    ):
+                    )):
+
                 # allow this to go unnoticed
                 pass
 
-            if     ((tokens[index_kw].line + 1 != tokens[i_next].line) and
-                    (tk_range_find_by_type(index_kw + 1, i_next - 1, Token.Comment.Preproc,
-                            filter_tk=lambda tk: tk.text in {
-                                "if", "ifdef", "ifndef", "else", "elif", "endif", }) != -1)
-                    ):
+            if ((tokens[index_kw].line + 1 != tokens[i_next].line) and
+                (tk_range_find_by_type(index_kw + 1, i_next - 1, Token.Comment.Preproc,
+                                       filter_tk=lambda tk: tk.text in {
+                                           "if", "ifdef", "ifndef", "else", "elif", "endif", }) != -1)):
                 # allow this to go unnoticed
                 pass
             else:
@@ -638,7 +637,7 @@ def blender_check_kw_switch(index_kw_start, index_kw, index_kw_end):
                 "return": ws_switch_indent + "\t\t",
                 "continue": ws_switch_indent + "\t\t",
                 "goto": ws_switch_indent + "\t\t",
-                }
+            }
 
             index_final = tk_match_backet(index_next)
 
@@ -697,12 +696,12 @@ def blender_check_kw_switch(index_kw_start, index_kw, index_kw_end):
                             if tokens[i].text in {
                                     "/* fall-through */", "/* fall through */",
                                     "/* pass-through */", "/* pass through */",
-                                    }:
+                            }:
 
                                 ok = True
                                 break
                             else:
-                                #~ print("Commment '%s'" % tokens[i].text)
+                                # ~ print("Commment '%s'" % tokens[i].text)
                                 pass
 
                         elif tokens[i].type == Token.Keyword:
@@ -715,19 +714,20 @@ def blender_check_kw_switch(index_kw_start, index_kw, index_kw_end):
                                     break
                                 else:
                                     ws_other_indent = extract_to_linestart(i)
-                                    ws_other_indent = ws_other_indent[:len(ws_other_indent) - len(ws_other_indent.lstrip())]
+                                    ws_other_indent = ws_other_indent[
+                                        :len(ws_other_indent) - len(ws_other_indent.lstrip())]
                                     ws_test_other = ws_test[tokens[i].text]
                                     if ws_other_indent == ws_test_other:
                                         ok = True
                                         break
                                     else:
                                         pass
-                                        #~ print("indent mismatch...")
-                                        #~ print("'%s'" % ws_other_indent)
-                                        #~ print("'%s'" % ws_test_other)
+                                        # ~ print("indent mismatch...")
+                                        # ~ print("'%s'" % ws_other_indent)
+                                        # ~ print("'%s'" % ws_test_other)
                     if not ok:
                         warning("E118", "case/default statement has no break", i_case, i_end)
-                        #~ print(tk_range_to_str(i_case - 1, i_end - 1, expand_tabs=True))
+                        # ~ print(tk_range_to_str(i_case - 1, i_end - 1, expand_tabs=True))
         else:
             warning("E119", "switch isn't the first token in the line", index_kw_start, index_kw_end)
     else:
@@ -798,15 +798,15 @@ def blender_check_operator(index_start, index_end, op_text, is_cpp):
             # detect (-a) vs (a - b)
             index_prev = index_start - 1
             if (tokens[index_prev].text.isspace() and
-                tokens[index_prev - 1].flag & IS_CAST):
+                    tokens[index_prev - 1].flag & IS_CAST):
                 index_prev -= 1
             if tokens[index_prev].flag & IS_CAST:
                 index_prev = tk_advance_flag(index_prev, -1, IS_CAST)
 
-            if     (not tokens[index_prev].text.isspace() and
+            if (not tokens[index_prev].text.isspace() and
                     tokens[index_prev].text not in {"[", "(", "{"}):
                 warning("E130", "no space before operator '%s'" % op_text, index_start, index_end)
-            if     (not tokens[index_end + 1].text.isspace() and
+            if (not tokens[index_end + 1].text.isspace() and
                     tokens[index_end + 1].text not in {"]", ")", "}"}):
                 # TODO, needs work to be useful
                 # warning("E130", "no space after operator '%s'" % op_text, index_start, index_end)
@@ -833,20 +833,20 @@ def blender_check_operator(index_start, index_end, op_text, is_cpp):
 
             index_prev = index_start - 1
             if (tokens[index_prev].text.isspace() and
-                tokens[index_prev - 1].flag & IS_CAST):
+                    tokens[index_prev - 1].flag & IS_CAST):
                 index_prev -= 1
             if tokens[index_prev].flag & IS_CAST:
                 index_prev = tk_advance_flag(index_prev, -1, IS_CAST)
 
             # This check could be improved, its a bit fuzzy
-            if     ((tokens[index_start - 1].flag & IS_CAST) or
+            if ((tokens[index_start - 1].flag & IS_CAST) or
                     (tokens[index_start + 1].flag & IS_CAST)):
                 # allow:
                 #     a = *(int *)b;
                 # and:
                 #     b = (int *)*b;
                 pass
-            elif   ((tokens[index_start - 1].type in Token.Number) or
+            elif ((tokens[index_start - 1].type in Token.Number) or
                     (tokens[index_start + 1].type in Token.Number)):
                 warning("E130", "no space around operator '%s'" % op_text, index_start, index_end)
             elif not (tokens[index_start - 1].text.isspace() or tokens[index_start - 1].text in {"(", "[", "{"}):
@@ -915,7 +915,7 @@ def blender_check_operator(index_start, index_end, op_text, is_cpp):
 
     if len(op_text) > 1:
         if op_text[0] == "*" and op_text[-1] == "*":
-            if     ((not tokens[index_start - 1].text.isspace()) and
+            if ((not tokens[index_start - 1].text.isspace()) and
                     (not tokens[index_start - 1].type == Token.Punctuation)):
                 warning("E130", "no space before pointer operator '%s'" % op_text, index_start, index_end)
             if tokens[index_end + 1].text.isspace():
@@ -1121,8 +1121,8 @@ def quick_check_indentation(lines):
             elif (':' in ls and l[0] != '\t'):
                 skip = True
             # /* comment */
-            #~ elif ls.startswith("/*") and ls.endswith("*/"):
-            #~     skip = True
+            # ~ elif ls.startswith("/*") and ls.endswith("*/"):
+            # ~     skip = True
             # /* some comment...
             elif ls.startswith("/*"):
                 skip = True
@@ -1292,7 +1292,8 @@ def scan_source(fp, code, args):
                 blender_check_brace_indent(i)
 
                 # check previous character is either a '{' or whitespace.
-                if (tokens[i - 1].line == tok.line) and not (tokens[i - 1].text.isspace() or tokens[i - 1].text == "{"):
+                if ((tokens[i - 1].line == tok.line) and
+                        not (tokens[i - 1].text.isspace() or tokens[i - 1].text == "{")):
                     warning("E150", "no space before '{'", i, i)
 
                 blender_check_function_definition(i)
@@ -1332,20 +1333,20 @@ def scan_source(fp, code, args):
         else:
             col += len(tok.text.expandtabs(TAB_SIZE))
 
-        #elif tok.type == Token.Name:
+        # elif tok.type == Token.Name:
         #    print(tok.text)
 
-        #print(ttype, type(ttype))
-        #print((ttype, value))
+        # print(ttype, type(ttype))
+        # print((ttype, value))
 
-    #for ttype, value in la:
+    # for ttype, value in la:
     #    #print(value, end="")
 
 
 def scan_source_filepath(filepath, args):
     # for quick tests
-    #~ if not filepath.endswith("creator.c"):
-    #~     return
+    # ~ if not filepath.endswith("creator.c"):
+    # ~     return
 
     code = open(filepath, 'r', encoding="utf-8").read()
 
@@ -1373,6 +1374,9 @@ def scan_source_recursive(dirpath, args):
                     yield filepath
 
     def is_source(filename):
+        # skip temp files
+        if filename.startswith("."):
+            return False
         ext = splitext(filename)[1]
         return (ext in {".c", ".inl", ".cpp", ".cxx", ".cc", ".hpp", ".hxx", ".h", ".hh", ".osl"})
 
@@ -1385,21 +1389,21 @@ def scan_source_recursive(dirpath, args):
 
 def create_parser():
     parser = argparse.ArgumentParser(
-            description=(
+        description=(
             "Check C/C++ code for conformance with blenders style guide:\n"
             "http://wiki.blender.org/index.php/Dev:Doc/CodeStyle)")
-            )
+    )
     parser.add_argument(
-            "paths",
-            nargs='+',
-            help="list of files or directories to check",
-            )
+        "paths",
+        nargs='+',
+        help="list of files or directories to check",
+    )
     parser.add_argument(
-            "-l",
-            "--no-length-check",
-            action="store_true",
-            help="skip warnings for long lines",
-            )
+        "-l",
+        "--no-length-check",
+        action="store_true",
+        help="skip warnings for long lines",
+    )
     return parser
 
 
@@ -1417,7 +1421,8 @@ def main(argv=None):
     print("Scanning:", SOURCE_DIR)
 
     if 0:
-        # SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))))
+        # SOURCE_DIR = os.path.normpath(
+        #     os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))))
         # scan_source_recursive(os.path.join(SOURCE_DIR, "source", "blender", "bmesh"))
         scan_source_recursive(os.path.join(SOURCE_DIR, "source/blender/makesrna/intern"), args)
         sys.exit(0)
diff --git a/check_source/check_style_c_config.py b/check_source/check_style_c_config.py
index 617436421c597eba591d929ec46e186b252266e0..8a9a3f659fdcb1316b2e97bdf1e8c3be1bf84f52 100644
--- a/check_source/check_style_c_config.py
+++ b/check_source/check_style_c_config.py
@@ -25,16 +25,15 @@ IGNORE = (
     "source/blender/blenloader/intern/readfile.c",
     "source/blender/blenloader/intern/versioning_250.c",
     "source/blender/blenloader/intern/versioning_legacy.c",
-    "source/blender/blenloader/intern/writefile.c",
 
     "source/blender/editors/space_logic/logic_buttons.c",
     "source/blender/editors/space_logic/logic_window.c",
-    
+
     "source/blender/imbuf/intern/dds/DirectDrawSurface.cpp",
 
     "source/blender/opencl/intern/clew.c",
     "source/blender/opencl/intern/clew.h",
-    )
+)
 
 IGNORE_DIR = (
     "source/blender/collada",
@@ -42,7 +41,8 @@ IGNORE_DIR = (
     "source/blender/editors/physics",
     "source/blender/editors/space_logic",
     "source/blender/freestyle",
-    )
+)
 
 
-SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))))
+SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(
+    os.path.join(os.path.dirname(__file__), "..", "..", ".."))))
diff --git a/tests/check_source/check_style_c_test.py b/tests/check_source/check_style_c_test.py
index c166038cbdd042860f376989223d1905e9f421af..631ad143f9847d825d444729a1f920f84c21d90f 100755
--- a/tests/check_source/check_style_c_test.py
+++ b/tests/check_source/check_style_c_test.py
@@ -6,10 +6,10 @@
 import os
 import sys
 sys.path.append(
-        os.path.join(
+    os.path.join(
         os.path.dirname(__file__),
         "..", "..", "check_source",
-        ))
+    ))
 # ----
 
 import unittest
@@ -363,7 +363,7 @@ void func(void)
              "a = -(int)b + 1;"),
             ("a = 1+ (int *)*b;",
              "a = 1 + (int *)*b;"),
-            )
+        )
 
         for expr_fail, expr_ok in ab_test:
             code = FUNC_BEGIN + expr_fail + FUNC_END
@@ -378,6 +378,7 @@ void func(void)
 class SourceCodeTestComplete(unittest.TestCase):
     """ Check we ran all tests.
     """
+
     def _test_complete(self):
         # --------------------------------------------------------------------
         # Check we test all errors
diff --git a/utils/blend2json.py b/utils/blend2json.py
index 976c647ff772094eb035ac796957c5fc526e2dde..ba08d65783d8a03ed3ea1eda7758295b083ec343 100755
--- a/utils/blend2json.py
+++ b/utils/blend2json.py
@@ -32,20 +32,24 @@ WARNING! This is still WIP tool!
 
 Example usage:
 
-   ./blend2json.py -i foo.blend
+   ./blend2json.py foo.blend
+
+To output also all 'name' fields from data:
+
+   ./blend2json.py --filter-data="name" foo.blend
 
 To output complete DNA struct info:
 
-   ./blend2json.py --full-dna -i foo.blend
+   ./blend2json.py --full-dna foo.blend
 
 To avoid getting all 'uid' old addresses (those will change really often even when data itself does not change,
 making diff pretty noisy):
 
-   ./blend2json.py --no-old-addresses -i foo.blend
+   ./blend2json.py --no-old-addresses foo.blend
 
 To check a .blend file instead of outputting its JSon version (use explicit -o option to do both at the same time):
 
-   ./blend2json.py -c -i foo.blend
+   ./blend2json.py -c foo.blend
 
 """
 
@@ -75,20 +79,16 @@ To include only MESH or CURVE blocks and all data used by them:
 """
 
 import os
-import struct
-import logging
-import gzip
-import tempfile
 import json
 import re
 
 # Avoid maintaining multiple blendfile modules
 import sys
 sys.path.append(os.path.join(
-        os.path.dirname(__file__),
-        "..", "..", "..",
-        "release", "scripts", "addons", "io_blend_utils", "blend",
-        ))
+    os.path.dirname(__file__),
+    "..", "..", "..",
+    "release", "scripts", "addons", "io_blend_utils", "blend",
+))
 del sys
 
 import blendfile
@@ -128,7 +128,7 @@ def list_to_json(lst, indent, indent_step, compact_output=False):
                 ((',\n%s%s' % (indent, indent_step)).join(
                     ('\n%s%s%s' % (indent, indent_step, l) if (i == 0 and l[0] in {'[', '{'}) else l)
                     for i, l in enumerate(lst))
-                ) +
+                 ) +
                 '\n%s]' % indent)
 
 
@@ -235,6 +235,7 @@ def do_bblock_filter(filters, blend, block, meta_keyval, data_keyval):
 def bblocks_to_json(args, fw, blend, address_map, indent, indent_step):
     no_address = args.no_address
     full_data = args.full_data
+    filter_data = args.filter_data
 
     def gen_meta_keyval(blend, block):
         keyval = [
@@ -249,9 +250,12 @@ def bblocks_to_json(args, fw, blend, address_map, indent, indent_step):
         ]
         return keyval
 
-    def gen_data_keyval(blend, block):
+    def gen_data_keyval(blend, block, key_filter=None):
         def _is_pointer(k):
             return blend.structs[block.sdna_index].field_from_path(blend.header, blend.handle, k).dna_name.is_pointer
+        if key_filter is not None:
+            return [(json_dumps(k)[1:-1], json_dumps(address_map.get(v, v) if _is_pointer(k) else v))
+                    for k, v in block.items_recursive_iter() if k in key_filter]
         return [(json_dumps(k)[1:-1], json_dumps(address_map.get(v, v) if _is_pointer(k) else v))
                 for k, v in block.items_recursive_iter()]
 
@@ -271,6 +275,9 @@ def bblocks_to_json(args, fw, blend, address_map, indent, indent_step):
             if full_data:
                 meta_keyval.append(("data", keyval_to_json(gen_data_keyval(blend, block),
                                                            indent + indent_step, indent_step, args.compact_output)))
+            elif filter_data:
+                meta_keyval.append(("data", keyval_to_json(gen_data_keyval(blend, block, filter_data),
+                                                           indent + indent_step, indent_step, args.compact_output)))
             keyval = keyval_to_json(meta_keyval, indent, indent_step, args.compact_output)
             fw('%s%s%s' % ('' if is_first else ',\n', indent, keyval))
             is_first = False
@@ -354,32 +361,46 @@ def argparse_create():
     parser = argparse.ArgumentParser(description=usage_text, epilog=epilog,
                                      formatter_class=argparse.RawDescriptionHelpFormatter)
 
-    parser.add_argument(dest="input", nargs="+", metavar='PATH',
-            help="Input .blend file(s)")
-    parser.add_argument("-o", "--output", dest="output", action="append", metavar='PATH', required=False,
-            help="Output .json file(s) (same path/name as input file(s) if not specified)")
-    parser.add_argument("-c", "--check-file", dest="check_file", default=False, action='store_true', required=False,
-            help=("Perform some basic validation checks over the .blend file"))
-    parser.add_argument("--compact-output", dest="compact_output", default=False, action='store_true', required=False,
-            help=("Output a very compact representation of blendfile (one line per block/DNAStruct)"))
-    parser.add_argument("--no-old-addresses", dest="no_address", default=False, action='store_true', required=False,
-            help=("Do not output old memory address of each block of data "
-                  "(used as 'uuid' in .blend files, but change pretty noisily)"))
-    parser.add_argument("--no-fake-old-addresses", dest="use_fake_address", default=True, action='store_false',
-            required=False,
-            help=("Do not 'rewrite' old memory address of each block of data "
-                  "(they are rewritten by default to some hash of their content, "
-                  "to try to avoid too much diff noise between different but similar files)"))
-    parser.add_argument("--full-data", dest="full_data",
-            default=False, action='store_true', required=False,
-            help=("Also put in JSon file data itself "
-                  "(WARNING! will generate *huge* verbose files - and is far from complete yet)"))
-    parser.add_argument("--full-dna", dest="full_dna", default=False, action='store_true', required=False,
-            help=("Also put in JSon file dna properties description (ignored when --compact-output is used)"))
+    parser.add_argument(
+        dest="input", nargs="+", metavar='PATH',
+        help="Input .blend file(s)")
+    parser.add_argument(
+        "-o", "--output", dest="output", action="append", metavar='PATH', required=False,
+        help="Output .json file(s) (same path/name as input file(s) if not specified)")
+    parser.add_argument(
+        "-c", "--check-file", dest="check_file", default=False, action='store_true', required=False,
+        help=("Perform some basic validation checks over the .blend file"))
+    parser.add_argument(
+        "--compact-output", dest="compact_output", default=False, action='store_true', required=False,
+        help=("Output a very compact representation of blendfile (one line per block/DNAStruct)"))
+    parser.add_argument(
+        "--no-old-addresses", dest="no_address", default=False, action='store_true', required=False,
+        help=("Do not output old memory address of each block of data "
+              "(used as 'uuid' in .blend files, but change pretty noisily)"))
+    parser.add_argument(
+        "--no-fake-old-addresses", dest="use_fake_address", default=True, action='store_false',
+        required=False,
+        help=("Do not 'rewrite' old memory address of each block of data "
+              "(they are rewritten by default to some hash of their content, "
+              "to try to avoid too much diff noise between different but similar files)"))
+    parser.add_argument(
+        "--full-data", dest="full_data",
+        default=False, action='store_true', required=False,
+        help=("Also put in JSon file data itself "
+              "(WARNING! will generate *huge* verbose files - and is far from complete yet)"))
+    parser.add_argument(
+        "--filter-data", dest="filter_data",
+        default=None, required=False,
+        help=("Only put in JSon file data fields which names match given comma-separated list "
+              "(ignored if --full-data is set)"))
+    parser.add_argument(
+        "--full-dna", dest="full_dna", default=False, action='store_true', required=False,
+        help=("Also put in JSon file dna properties description (ignored when --compact-output is used)"))
 
     group = parser.add_argument_group("Filters", FILTER_DOC)
-    group.add_argument("--filter-block", dest="block_filters", nargs=3, action='append',
-            help=("Filter to apply to BLOCKS (a.k.a. data itself)"))
+    group.add_argument(
+        "--filter-block", dest="block_filters", nargs=3, action='append',
+        help=("Filter to apply to BLOCKS (a.k.a. data itself)"))
 
     return parser
 
@@ -402,6 +423,12 @@ def main():
                                re.compile(f), re.compile(d))
                               for m, f, d in args.block_filters]
 
+    if args.filter_data:
+        if args.full_data:
+            args.filter_data = None
+        else:
+            args.filter_data = {n.encode() for n in args.filter_data.split(',')}
+
     for infile, outfile in zip(args.input, args.output):
         with blendfile.open_blend(infile) as blend:
             address_map = gen_fake_addresses(args, blend)
diff --git a/utils/blender_update_themes.py b/utils/blender_update_themes.py
index f9a4545db64319f17ef0624681e4b7fc1f33d7ce..d8b483ccdc2c7055945c34cefeb35ba25d8749af 100644
--- a/utils/blender_update_themes.py
+++ b/utils/blender_update_themes.py
@@ -34,7 +34,7 @@ def update(filepath):
     preset_xml_map = (
         ("user_preferences.themes[0]", "Theme"),
         ("user_preferences.ui_styles[0]", "Theme"),
-        )
+    )
     rna_xml.xml_file_run(context,
                          filepath,
                          preset_xml_map)
diff --git a/utils/credits_git_gen.py b/utils/credits_git_gen.py
index f1049a89bec8fe3e9a7110273419b73985df8b53..3fcf704f6f9714f2eab9ff2ec1c646874e80133f 100755
--- a/utils/credits_git_gen.py
+++ b/utils/credits_git_gen.py
@@ -37,7 +37,7 @@ class CreditUser:
         "commit_total",
         "year_min",
         "year_max",
-        )
+    )
 
     def __init__(self):
         self.commit_total = 0
@@ -46,7 +46,7 @@ class CreditUser:
 class Credits:
     __slots__ = (
         "users",
-        )
+    )
 
     def __init__(self):
         self.users = {}
@@ -115,12 +115,14 @@ def argparse_create():
 
     parser = argparse.ArgumentParser(description=usage_text, epilog=epilog)
 
-    parser.add_argument("--source", dest="source_dir",
-            metavar='PATH', required=True,
-            help="Path to git repository")
-    parser.add_argument("--range", dest="range_sha1",
-            metavar='SHA1_RANGE', required=True,
-            help="Range to use, eg: 169c95b8..HEAD")
+    parser.add_argument(
+        "--source", dest="source_dir",
+        metavar='PATH', required=True,
+        help="Path to git repository")
+    parser.add_argument(
+        "--range", dest="range_sha1",
+                        metavar='SHA1_RANGE', required=True,
+                        help="Range to use, eg: 169c95b8..HEAD")
 
     return parser
 
@@ -137,7 +139,7 @@ def main():
             b"blender/extern/",
             b"blender/intern/opennl/",
             b"blender/intern/moto/",
-            )
+        )
 
         if not any(f for f in c.files if not f.startswith(ignore_dir)):
             return False
@@ -151,7 +153,7 @@ def main():
         "<b>BioSkill GmbH</b> - H3D compatibility for X3D Exporter, "
         "OBJ Nurbs Import/Export",
         "<b>AutoCRC</b> - Improvements to fluid particles, vertex color baking",
-        )
+    )
 
     credits = Credits()
     # commit_range = "HEAD~10..HEAD"
@@ -159,8 +161,8 @@ def main():
     citer = GitCommitIter(args.source_dir, commit_range)
     credits.process((c for c in citer if is_credit_commit_valid(c)))
     credits.write("credits.html",
-        is_main_credits=True,
-        contrib_companies=contrib_companies)
+                  is_main_credits=True,
+                  contrib_companies=contrib_companies)
     print("Written: credits.html")
 
 
diff --git a/utils/cycles_commits_sync.py b/utils/cycles_commits_sync.py
new file mode 100755
index 0000000000000000000000000000000000000000..c0dfb3864bf0b299b1a059b7a45b9748806257c8
--- /dev/null
+++ b/utils/cycles_commits_sync.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python3
+
+import collections
+import os
+import subprocess
+import sys
+import tempfile
+
+# Hashes to be ignored
+#
+# The system sometimes fails to match commits and suggests to backport
+# revision which was already ported. In order to solve that we can:
+#
+# - Explicitly ignore some of the commits.
+# - Move the synchronization point forward.
+IGNORE_HASHES = {
+    }
+
+# Start revisions from both repositories.
+CYCLES_START_COMMIT=b"49df7712bae023da3318ba03e065cb8513a8d5d3"
+BLENDER_START_COMMIT=b"ae76a90593fb03187789e29676f589adfd5294ea"
+
+# Prefix which is common for all the subjects.
+GIT_SUBJECT_COMMON_PREFIX = b"Subject: [PATCH] "
+
+# Marker which indicates begin of new file in the patch set.
+GIT_FILE_SECTION_MARKER = b"diff --git"
+
+# Marker of the end of the patchset.
+GIT_PATCHSET_END_MARKER = b"-- "
+
+# Prefix of topic to be omitted
+SUBJECT_SKIP_PREFIX = (
+    b"Cycles: ",
+    b"cycles: ",
+    b"Cycles Standalone: ",
+    b"Cycles standalone: ",
+    b"cycles standalone: ",
+    )
+
+
+def subject_strip(common_prefix, subject):
+    for prefix in SUBJECT_SKIP_PREFIX:
+        full_prefix = common_prefix + prefix
+        if subject.startswith(full_prefix):
+            subject = common_prefix + subject[len(full_prefix):]
+            break
+    return subject
+
+
+def replace_file_prefix(path, prefix, replace_prefix):
+    tokens = path.split(b' ')
+    prefix_len = len(prefix)
+    for i, t in enumerate(tokens):
+        for x in (b"a/", b"b/"):
+            if t.startswith(x + prefix):
+                tokens[i] = x + replace_prefix + t[prefix_len + 2:]
+    return b' '.join(tokens)
+
+
+def cleanup_patch(patch, accept_prefix, replace_prefix):
+    assert(accept_prefix[0] != b'/')
+    assert(replace_prefix[0] != b'/')
+
+    full_accept_prefix = GIT_FILE_SECTION_MARKER + b" a/" + accept_prefix
+
+    with open(patch, "rb") as f:
+        content = f.readlines()
+
+    clean_content = []
+    do_skip = False
+    for line in content:
+        if line.startswith(GIT_SUBJECT_COMMON_PREFIX):
+            # Skip possible prefix like "Cycles:", we already know change is
+            # about Cycles since it's being committed to a Cycles repository.
+            line = subject_strip(GIT_SUBJECT_COMMON_PREFIX, line)
+
+            # Dots usually are omitted in the topic
+            line = line.replace(b".\n", b"\n")
+        elif line.startswith(GIT_FILE_SECTION_MARKER):
+            if not line.startswith(full_accept_prefix):
+                do_skip = True
+            else:
+                do_skip = False
+                line = replace_file_prefix(line, accept_prefix, replace_prefix)
+        elif line.startswith(GIT_PATCHSET_END_MARKER):
+            do_skip = False
+        elif line.startswith(b"---") or line.startswith(b"+++"):
+                line = replace_file_prefix(line, accept_prefix, replace_prefix)
+
+        if not do_skip:
+            clean_content.append(line)
+
+    with open(patch, "wb") as f:
+        f.writelines(clean_content)
+
+
+# Get mapping from commit subject to commit hash.
+#
+# It'll actually include timestamp of the commit to the map key, so commits with
+# the same subject wouldn't conflict with each other.
+def commit_map_get(repository, path, start_commit):
+    command = (b"git",
+               b"--git-dir=" + os.path.join(repository, b'.git'),
+               b"--work-tree=" + repository,
+               b"log", b"--format=%H %at %s", b"--reverse",
+               start_commit + b'..HEAD',
+               os.path.join(repository, path))
+    lines = subprocess.check_output(command).split(b"\n")
+    commit_map = collections.OrderedDict()
+    for line in lines:
+        if line:
+            commit_sha, stamped_subject = line.split(b' ', 1)
+            stamp, subject = stamped_subject.split(b' ', 1)
+            subject = subject_strip(b"", subject).rstrip(b".")
+            stamped_subject = stamp + b" " + subject
+
+            if commit_sha in IGNORE_HASHES:
+                continue
+            commit_map[stamped_subject] = commit_sha
+    return commit_map
+
+
+# Get difference between two lists of commits.
+# Returns two lists: first are the commits to be ported from Cycles to Blender,
+# second one are the commits to be ported from Blender to Cycles.
+def commits_get_difference(cycles_map, blender_map):
+    cycles_to_blender = []
+    for stamped_subject, commit_hash in cycles_map.items():
+        if not stamped_subject in blender_map:
+            cycles_to_blender.append(commit_hash)
+
+    blender_to_cycles = []
+    for stamped_subject, commit_hash in blender_map.items():
+        if not stamped_subject in cycles_map:
+            blender_to_cycles.append(commit_hash)
+
+    return cycles_to_blender, blender_to_cycles
+
+
+# Transfer commits from one repository to another.
+# Doesn't do actual commit just for the safety.
+def transfer_commits(commit_hashes,
+                     from_repository,
+                     to_repository,
+                     dst_is_cycles):
+    patch_index = 1
+    for commit_hash in commit_hashes:
+        command = (
+            b"git",
+            b"--git-dir=" + os.path.join(from_repository, b'.git'),
+            b"--work-tree=" + from_repository,
+            b"format-patch", b"-1",
+            b"--start-number", str(patch_index),
+            b"-o", to_repository,
+            commit_hash,
+            )
+        patch_file = subprocess.check_output(command).rstrip(b"\n")
+        if dst_is_cycles:
+            cleanup_patch(patch_file, b"intern/cycles", b"src")
+        else:
+            cleanup_patch(patch_file, b"src", b"intern/cycles")
+        patch_index += 1
+
+
+def main():
+    if len(sys.argv) != 3:
+        print("Usage: %s /path/to/cycles/ /path/to/blender/" % sys.argv[0])
+        return
+
+    cycles_repository = sys.argv[1].encode()
+    blender_repository = sys.argv[2].encode()
+
+    cycles_map = commit_map_get(cycles_repository, b'', CYCLES_START_COMMIT)
+    blender_map = commit_map_get(blender_repository, b"intern/cycles", BLENDER_START_COMMIT)
+    diff = commits_get_difference(cycles_map, blender_map)
+
+    transfer_commits(diff[0], cycles_repository, blender_repository, False)
+    transfer_commits(diff[1], blender_repository, cycles_repository, True)
+
+    print("Missing commits were saved to the blender and cycles repositories.")
+    print("Check them and if they're all fine run:")
+    print("")
+    print("  git am *.path")
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/cycles_timeit.py b/utils/cycles_timeit.py
new file mode 100755
index 0000000000000000000000000000000000000000..14ca484d08aad608bf76bf6f164374e322e7d2ff
--- /dev/null
+++ b/utils/cycles_timeit.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python3
+
+import argparse
+import re
+import shutil
+import subprocess
+import sys
+import time
+
+class COLORS:
+    HEADER = '\033[95m'
+    OKBLUE = '\033[94m'
+    OKGREEN = '\033[92m'
+    WARNING = '\033[93m'
+    FAIL = '\033[91m'
+    ENDC = '\033[0m'
+    BOLD = '\033[1m'
+    UNDERLINE = '\033[4m'
+
+VERBOSE = False
+
+#########################################
+# Generic helper functions.
+
+def logVerbose(*args):
+    if VERBOSE:
+        print(*args)
+
+
+def logHeader(*args):
+    print(COLORS.HEADER + COLORS.BOLD, end="")
+    print(*args, end="")
+    print(COLORS.ENDC)
+
+
+def logWarning(*args):
+    print(COLORS.WARNING + COLORS.BOLD, end="")
+    print(*args, end="")
+    print(COLORS.ENDC)
+
+
+def logOk(*args):
+    print(COLORS.OKGREEN + COLORS.BOLD, end="")
+    print(*args, end="")
+    print(COLORS.ENDC)
+
+
+def progress(count, total, prefix="", suffix=""):
+    if VERBOSE:
+        return
+
+    size = shutil.get_terminal_size((80, 20))
+
+    if prefix != "":
+        prefix = prefix + "    "
+    if suffix != "":
+        suffix = "    " + suffix
+
+    bar_len = size.columns - len(prefix) - len(suffix) - 10
+    filled_len = int(round(bar_len * count / float(total)))
+
+    percents = round(100.0 * count / float(total), 1)
+    bar = '=' * filled_len + '-' * (bar_len - filled_len)
+
+    sys.stdout.write('%s[%s] %s%%%s\r' % (prefix, bar, percents, suffix))
+    sys.stdout.flush()
+
+
+def progressClear():
+    if VERBOSE:
+        return
+
+    size = shutil.get_terminal_size((80, 20))
+    sys.stdout.write(" " * size.columns + "\r")
+    sys.stdout.flush()
+
+
+def humanReadableTimeDifference(seconds):
+    hours = int(seconds) // 60 // 60
+    seconds = seconds - hours * 60 * 60
+    minutes = int(seconds) // 60
+    seconds = seconds - minutes * 60
+    if hours == 0:
+        return "%02d:%05.2f" % (minutes, seconds)
+    else:
+        return "%02d:%02d:%05.2f" % (hours, minutes, seconds)
+
+
+def humanReadableTimeToSeconds(time):
+    tokens = time.split(".")
+    result = 0
+    if len(tokens) == 2:
+        result = float("0." + tokens[1])
+    mult = 1
+    for token in reversed(tokens[0].split(":")):
+        result += int(token) * mult
+        mult *= 60
+    return result
+
+#########################################
+# Benchmark specific helper funcitons.
+
+def configureArgumentParser():
+    parser = argparse.ArgumentParser(
+        description="Cycles benchmark helper script.")
+    parser.add_argument("-b", "--binary",
+                        help="Full file path to Blender's binary " +
+                             "to use for rendering",
+                        default="blender")
+    parser.add_argument("-f", "--files", nargs='+')
+    parser.add_argument("-v", "--verbose",
+                        help="Perform fully verbose communication",
+                        action="store_true",
+                        default=False)
+    return parser
+
+
+def benchmarkFile(blender, blendfile, stats):
+    logHeader("Begin benchmark of file {}" . format(blendfile))
+    # Pepare some regex for parsing
+    re_path_tracing = re.compile(".*Path Tracing Tile ([0-9]+)/([0-9]+)$")
+    re_total_render_time = re.compile(".*Total render time: ([0-9]+(\.[0-9]+)?)")
+    re_render_time_no_sync = re.compile(
+        ".*Render time \(without synchronization\): ([0-9]+(\.[0-9]+)?)")
+    re_pipeline_time = re.compile("Time: ([0-9:\.]+) \(Saving: ([0-9:\.]+)\)")
+    # Prepare outout folder.
+    # TODO(sergey): Use some proper output folder.
+    output_folder = "/tmp/"
+    # Configure command for the current file.
+    command = (blender,
+               "--background",
+               "-noaudio",
+               "--factory-startup",
+               blendfile,
+               "--engine", "CYCLES",
+               "--debug-cycles",
+               "--render-output", output_folder,
+               "--render-format", "PNG",
+               "-f", "1")
+    # Run Blender with configured command line.
+    logVerbose("About to execuet command: {}" . format(command))
+    start_time = time.time()
+    process = subprocess.Popen(command,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.STDOUT)
+    # Keep reading status while Blender is alive.
+    total_render_time = "N/A"
+    render_time_no_sync = "N/A"
+    pipeline_render_time = "N/A"
+    while True:
+        line = process.stdout.readline()
+        if line == b"" and process.poll() != None:
+            break
+        line = line.decode().strip()
+        if line == "":
+            continue
+        logVerbose("Line from stdout: {}" . format(line))
+        match = re_path_tracing.match(line)
+        if match:
+            current_tiles = int(match.group(1))
+            total_tiles = int(match.group(2))
+            elapsed_time = time.time() - start_time
+            elapsed_time_str = humanReadableTimeDifference(elapsed_time)
+            progress(current_tiles,
+                     total_tiles,
+                     prefix="Path Tracing Tiles {}" . format(elapsed_time_str))
+        match = re_total_render_time.match(line)
+        if match:
+            total_render_time = float(match.group(1))
+        match = re_render_time_no_sync.match(line)
+        if match:
+            render_time_no_sync = float(match.group(1))
+        match = re_pipeline_time.match(line)
+        if match:
+            pipeline_render_time = humanReadableTimeToSeconds(match.group(1))
+
+    if process.returncode != 0:
+        return False
+
+    # Clear line used by progress.
+    progressClear()
+    print("Total pipeline render time: {} ({} sec)"
+          . format(humanReadableTimeDifference(pipeline_render_time),
+                   pipeline_render_time))
+    print("Total Cycles render time: {} ({} sec)"
+          . format(humanReadableTimeDifference(total_render_time),
+                   total_render_time))
+    print("Pure Cycles render time (without sync): {} ({} sec)"
+          . format(humanReadableTimeDifference(render_time_no_sync),
+                   render_time_no_sync))
+    logOk("Successfully rendered")
+    stats[blendfile] = {'PIPELINE_TOTAL': pipeline_render_time,
+                        'CYCLES_TOTAL': total_render_time,
+                        'CYCLES_NO_SYNC': render_time_no_sync}
+    return True
+
+
+def benchmarkAll(blender, files):
+    stats = {}
+    for blendfile in files:
+        try:
+            benchmarkFile(blender, blendfile, stats)
+        except KeyboardInterrupt:
+            print("")
+            logWarning("Rendering aborted!")
+            return
+
+
+def main():
+    parser = configureArgumentParser()
+    args = parser.parse_args()
+    if args.verbose:
+        global VERBOSE
+        VERBOSE = True
+    benchmarkAll(args.binary, args.files)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/git_log.py b/utils/git_log.py
index f738b5f12cca46d877a87ed7c3a604a727fa3854..dd2c91bc60c76bb475659e954a97754676046158 100644
--- a/utils/git_log.py
+++ b/utils/git_log.py
@@ -36,18 +36,18 @@ class GitCommit:
         "_body",
         "_files",
         "_files_status",
-        )
+    )
 
     def __init__(self, sha1, git_dir):
         self.sha1 = sha1
         self._git_dir = git_dir
 
         self._author = \
-        self._date = \
-        self._body = \
-        self._files = \
-        self._files_status = \
-        None
+            self._date = \
+            self._body = \
+            self._files = \
+            self._files_status = \
+            None
 
     def cache(self):
         """ Cache all properties
@@ -68,13 +68,13 @@ class GitCommit:
             "-1",  # only this rev
             self.sha1,
             "--format=" + format,
-            ) + args
+        ) + args
         # print(" ".join(cmd))
 
         p = subprocess.Popen(
             cmd,
             stdout=subprocess.PIPE,
-            )
+        )
         return p.stdout.read()
 
     @property
@@ -86,11 +86,11 @@ class GitCommit:
             "rev-parse",
             "--short",
             self.sha1,
-            )
+        )
         p = subprocess.Popen(
             cmd,
             stdout=subprocess.PIPE,
-            )
+        )
         return p.stdout.read().strip().decode('ascii')
 
     @property
@@ -147,7 +147,7 @@ class GitCommitIter:
         "_git_dir",
         "_sha1_range",
         "_process",
-        )
+    )
 
     def __init__(self, path, sha1_range):
         self._path = path
@@ -163,13 +163,13 @@ class GitCommitIter:
             "log",
             self._sha1_range,
             "--format=%H",
-            )
+        )
         # print(" ".join(cmd))
 
         self._process = subprocess.Popen(
             cmd,
             stdout=subprocess.PIPE,
-            )
+        )
         return self
 
     def __next__(self):
@@ -199,11 +199,11 @@ class GitRepo:
             "rev-parse",
             "--abbrev-ref",
             "HEAD",
-            )
+        )
         # print(" ".join(cmd))
 
         p = subprocess.Popen(
             cmd,
             stdout=subprocess.PIPE,
-            )
+        )
         return p.stdout.read()
diff --git a/utils/git_log_review_commits.py b/utils/git_log_review_commits.py
index 6e3d3589b8e243c1ec853ce8404590c08738b011..03ea099e4df4a625f9cc63da05ff829dbab93074 100755
--- a/utils/git_log_review_commits.py
+++ b/utils/git_log_review_commits.py
@@ -37,6 +37,7 @@ class _Getch:
     Gets a single character from standard input.
     Does not echo to the screen.
     """
+
     def __init__(self):
         try:
             self.impl = _GetchWindows()
@@ -48,6 +49,7 @@ class _Getch:
 
 
 class _GetchUnix:
+
     def __init__(self):
         import tty
         import sys
@@ -67,6 +69,7 @@ class _GetchUnix:
 
 
 class _GetchWindows:
+
     def __init__(self):
         import msvcrt
 
@@ -142,18 +145,22 @@ def argparse_create():
 
     parser = argparse.ArgumentParser(description=usage_text, epilog=epilog)
 
-    parser.add_argument("--source", dest="source_dir",
-            metavar='PATH', required=True,
-            help="Path to git repository")
-    parser.add_argument("--range", dest="range_sha1",
-            metavar='SHA1_RANGE', required=True,
-            help="Range to use, eg: 169c95b8..HEAD")
-    parser.add_argument("--author", dest="author",
-            metavar='AUTHOR', type=str, required=False,
-            help=("Method to filter commits in ['BUGFIX', todo]"))
-    parser.add_argument("--filter", dest="filter_type",
-            metavar='FILTER', type=str, required=False,
-            help=("Method to filter commits in ['BUGFIX', todo]"))
+    parser.add_argument(
+        "--source", dest="source_dir",
+        metavar='PATH', required=True,
+        help="Path to git repository")
+    parser.add_argument(
+        "--range", dest="range_sha1",
+                        metavar='SHA1_RANGE', required=True,
+                        help="Range to use, eg: 169c95b8..HEAD")
+    parser.add_argument(
+        "--author", dest="author",
+        metavar='AUTHOR', type=str, required=False,
+        help=("Method to filter commits in ['BUGFIX', todo]"))
+    parser.add_argument(
+        "--filter", dest="filter_type",
+        metavar='FILTER', type=str, required=False,
+        help=("Method to filter commits in ['BUGFIX', todo]"))
 
     return parser
 
diff --git a/utils/git_log_review_commits_advanced.py b/utils/git_log_review_commits_advanced.py
index 7294b5e74d977ad7f103fbf9cf78e8e8d58c1cc4..7c256c2c77b11b847b29ce07d35465f2a1a9e500 100755
--- a/utils/git_log_review_commits_advanced.py
+++ b/utils/git_log_review_commits_advanced.py
@@ -58,10 +58,10 @@ IGNORE_END_LINE = "<!-- IGNORE_END -->"
 
 _cwd = os.getcwd()
 __doc__ = __doc__ + \
-          "\nRaw GIT revisions files:\n\t* Accepted: %s\n\t* Rejected: %s\n\n" \
-          "Basic pretty-printed accepted revisions: %s\n\nFull release notes wiki page: %s\n" \
-          % (os.path.join(_cwd, ACCEPT_FILE), os.path.join(_cwd, REJECT_FILE),
-             os.path.join(_cwd, ACCEPT_PRETTY_FILE), os.path.join(_cwd, ACCEPT_RELEASELOG_FILE))
+    "\nRaw GIT revisions files:\n\t* Accepted: %s\n\t* Rejected: %s\n\n" \
+    "Basic pretty-printed accepted revisions: %s\n\nFull release notes wiki page: %s\n" \
+    % (os.path.join(_cwd, ACCEPT_FILE), os.path.join(_cwd, REJECT_FILE),
+       os.path.join(_cwd, ACCEPT_PRETTY_FILE), os.path.join(_cwd, ACCEPT_RELEASELOG_FILE))
 del _cwd
 
 
@@ -70,6 +70,7 @@ class _Getch:
     Gets a single character from standard input.
     Does not echo to the screen.
     """
+
     def __init__(self):
         try:
             self.impl = _GetchWindows()
@@ -81,6 +82,7 @@ class _Getch:
 
 
 class _GetchUnix:
+
     def __init__(self):
         import tty
         import sys
@@ -100,6 +102,7 @@ class _GetchUnix:
 
 
 class _GetchWindows:
+
     def __init__(self):
         import msvcrt
 
@@ -150,7 +153,7 @@ BUGFIX_CATEGORIES = (
         "Grease Pencil",
         "Objects",
         "Dependency Graph",
-        ),
+    ),
     ),
 
     ("Data / Geometry", (
@@ -160,14 +163,14 @@ BUGFIX_CATEGORIES = (
         "Meta Editing",
         "Modifiers",
         "Material / Texture",
-        ),
+    ),
     ),
 
     ("Physics / Simulations / Sculpt / Paint", (
         "Particles",
         "Physics / Hair / Simulations",
         "Sculpting / Painting",
-        ),
+    ),
     ),
 
     ("Image / Video / Render", (
@@ -180,7 +183,7 @@ BUGFIX_CATEGORIES = (
         "Render: Cycles",
         "Render: Freestyle",
         "Sequencer",
-        ),
+    ),
     ),
 
     ("UI / Spaces / Transform", (
@@ -190,11 +193,11 @@ BUGFIX_CATEGORIES = (
         "Text Editor",
         "Transform",
         "User Interface",
-        ),
+    ),
     ),
 
     ("Game Engine", (
-        ),
+    ),
     ),
 
     ("System / Misc", (
@@ -204,7 +207,7 @@ BUGFIX_CATEGORIES = (
         "Other",
         "Python",
         "System",
-        ),
+    ),
     ),
 )
 
@@ -457,35 +460,44 @@ def argparse_create():
     parser = argparse.ArgumentParser(description=usage_text, epilog=epilog,
                                      formatter_class=argparse.RawDescriptionHelpFormatter)
 
-    parser.add_argument("--source", dest="source_dir",
-            metavar='PATH', required=True,
-            help="Path to git repository")
-    parser.add_argument("--range", dest="range_sha1",
-            metavar='SHA1_RANGE', required=False,
-            help="Range to use, eg: 169c95b8..HEAD")
-    parser.add_argument("--author", dest="author",
-            metavar='AUTHOR', type=str, required=False,
-            help=("Author(s) to filter commits ("))
-    parser.add_argument("--filter", dest="filter_type",
-            metavar='FILTER', type=str, required=False,
-            help=("Method to filter commits in ['BUGFIX', 'NOISE']"))
-    parser.add_argument("--accept-pretty", dest="accept_pretty",
-            default=False, action='store_true', required=False,
-            help=("Also output pretty-printed accepted commits (nearly ready for WIKI release notes)"))
-    parser.add_argument("--accept-releaselog", dest="accept_releaselog",
-            default=False, action='store_true', required=False,
-            help=("Also output accepted commits as a wiki release log page (adds sorting by categories)"))
-    parser.add_argument("--blender-rev", dest="blender_rev",
-            default=None, required=False,
-            help=("Blender revision (only used to generate release notes page)"))
-    parser.add_argument("--blender-rstate", dest="blender_rstate",
-            default="alpha", required=False,
-            help=("Blender release state (like alpha, beta, rc1, final, corr_a, corr_b, etc.), "
-                  "each revision will be tagged by given one"))
-    parser.add_argument("--blender-rstate-list", dest="blender_rstate_list",
-            default="", required=False, type=lambda s: s.split(","),
-            help=("Blender release state(s) to additionaly list in their own sections "
-                  "(e.g. pass 'RC2' to list fixes between RC1 and RC2, ie tagged as RC2, etc.)"))
+    parser.add_argument(
+        "--source", dest="source_dir",
+        metavar='PATH', required=True,
+        help="Path to git repository")
+    parser.add_argument(
+        "--range", dest="range_sha1",
+                        metavar='SHA1_RANGE', required=False,
+                        help="Range to use, eg: 169c95b8..HEAD")
+    parser.add_argument(
+        "--author", dest="author",
+        metavar='AUTHOR', type=str, required=False,
+        help=("Author(s) to filter commits ("))
+    parser.add_argument(
+        "--filter", dest="filter_type",
+        metavar='FILTER', type=str, required=False,
+        help=("Method to filter commits in ['BUGFIX', 'NOISE']"))
+    parser.add_argument(
+        "--accept-pretty", dest="accept_pretty",
+        default=False, action='store_true', required=False,
+        help=("Also output pretty-printed accepted commits (nearly ready for WIKI release notes)"))
+    parser.add_argument(
+        "--accept-releaselog", dest="accept_releaselog",
+        default=False, action='store_true', required=False,
+        help=("Also output accepted commits as a wiki release log page (adds sorting by categories)"))
+    parser.add_argument(
+        "--blender-rev", dest="blender_rev",
+        default=None, required=False,
+        help=("Blender revision (only used to generate release notes page)"))
+    parser.add_argument(
+        "--blender-rstate", dest="blender_rstate",
+        default="alpha", required=False,
+        help=("Blender release state (like alpha, beta, rc1, final, corr_a, corr_b, etc.), "
+              "each revision will be tagged by given one"))
+    parser.add_argument(
+        "--blender-rstate-list", dest="blender_rstate_list",
+        default="", required=False, type=lambda s: s.split(","),
+        help=("Blender release state(s) to additionaly list in their own sections "
+              "(e.g. pass 'RC2' to list fixes between RC1 and RC2, ie tagged as RC2, etc.)"))
 
     return parser
 
diff --git a/utils/make_cursor_gui.py b/utils/make_cursor_gui.py
index 523a186e35778afd891e2c5c2be1a3deddb72d96..ab51c1ee06222a3475cecff0d924d0c3b0f5483b 100755
--- a/utils/make_cursor_gui.py
+++ b/utils/make_cursor_gui.py
@@ -4,17 +4,17 @@
 # Oct. 30, 2003
 
 from tkinter import (
-        Button,
-        Canvas,
-        Checkbutton,
-        END,
-        Frame,
-        IntVar,
-        Label,
-        RIDGE,
-        Text,
-        Tk,
-        )
+    Button,
+    Canvas,
+    Checkbutton,
+    END,
+    Frame,
+    IntVar,
+    Label,
+    RIDGE,
+    Text,
+    Tk,
+)
 
 color = ("black", "white", "darkgreen", "gray")
 
@@ -110,7 +110,7 @@ class App:
                 self.state.append(2)
             oldstate = []
 
-        #Insert scaling here
+        # Insert scaling here
 
         self.updatescrn()
         self.prev.config(width=self.size + 1, height=self.size + 1)
diff --git a/utils/uncrustify.cfg b/utils/uncrustify.cfg
index ac7e39cbc84d98ae5149def3744f9d4ba8fc0e5c..a7b855fb6f1d1b3433270d80048f8b4297685992 100644
--- a/utils/uncrustify.cfg
+++ b/utils/uncrustify.cfg
@@ -1707,3 +1707,6 @@ set FOR BM_ITER_ELEM
 set FOR BM_ITER_ELEM_INDEX
 # bmesh_operator_api.h
 set FOR BMO_ITER
+# BKE_node.h
+set FOR FOREACH_NODETREE
+
diff --git a/utils_api/bpy_introspect_ui.py b/utils_api/bpy_introspect_ui.py
index 782346b56291562c3141b68009e3087d84f60ce8..933485c9d15d6043813cbb8cd6756f0a07710681 100644
--- a/utils_api/bpy_introspect_ui.py
+++ b/utils_api/bpy_introspect_ui.py
@@ -183,6 +183,7 @@ def NewAttr(attr, attr_single):
 
 
 class BaseFakeUI:
+
     def __init__(self):
         self.layout = NewAttr("self.layout", "layout")
 
@@ -200,6 +201,7 @@ class Header(BaseFakeUI):
 
 
 class Menu(BaseFakeUI):
+
     def draw_preset(self, context):
         pass
 
diff --git a/utils_build/cmake-flags b/utils_build/cmake-flags
index ab0955c63b3e13e1f7c7e3497f64b884d018955b..1c2af550c6981a1588046877b0b4ae82ba86f4c2 100755
--- a/utils_build/cmake-flags
+++ b/utils_build/cmake-flags
@@ -38,158 +38,158 @@ PRESETS = {
         "CMAKE_CXX_FLAGS": (("-fsanitize=address",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=address",), ()),
         "CMAKE_EXE_LINKER_FLAGS": (("-lasan",), ()),
-        },
+    },
     "sanitize_leak": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=leak",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=leak",), ()),
-        },
+    },
     "sanitize_undefined": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=undefined",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=undefined",), ()),
-        },
+    },
     "sanitize_thread": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=thread",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=thread",), ()),
-        },
+    },
 
     # GCC5
     "sanitize_float_divide_by_zero": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=float-divide-by-zero",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=float-divide-by-zero",), ()),
-        },
+    },
     "sanitize_float_cast_overflow": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=float-cast-overflow",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=float-cast-overflow",), ()),
-        },
+    },
     "sanitize_int_overflow": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=signed-integer-overflow",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=signed-integer-overflow",), ()),
-        },
+    },
     "sanitize_bool": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=bool",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=bool",), ()),
-        },
+    },
     "sanitize_enum": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=enum",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=enum",), ()),
-        },
+    },
     "sanitize_bounds": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=bounds",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=bounds",), ()),
-        },
+    },
     "sanitize_bounds_strict": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=bounds-strict",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=bounds-strict",), ()),
-        },
+    },
     "sanitize_vla_bounds": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=vla-bounds",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=vla-bounds",), ()),
-        },
+    },
     "sanitize_alignment": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=alignment",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=alignment",), ()),
-        },
+    },
     "sanitize_object_size": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=object-size",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=object-size",), ()),
-        },
+    },
     "sanitize_nonull_attribute": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=nonnull-attribute",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=nonnull-attribute",), ()),
-        },
+    },
     "sanitize_returns_nonull_attribute": {
         "CMAKE_CXX_FLAGS": (("-fsanitize=returns-nonnull-attribute",), ()),
         "CMAKE_C_FLAGS":   (("-fsanitize=returns-nonnull-attribute",), ()),
-        },
+    },
 
     "warn_all": {
         "CMAKE_CXX_FLAGS": (("-Wall",), ()),
         "CMAKE_C_FLAGS":   (("-Wall",), ()),
-        },
+    },
     "warn_extra": {
         "CMAKE_CXX_FLAGS": (("-Wextra",), ()),
         "CMAKE_C_FLAGS":   (("-Wextra",), ()),
-        },
+    },
     "warn_unused_macros": {
         "CMAKE_CXX_FLAGS": (("-Wunused-macros",), ()),
         "CMAKE_C_FLAGS":   (("-Wunused-macros",), ()),
-        },
+    },
     "warn_undefined_macros": {
         "CMAKE_CXX_FLAGS": (("-Wundef",), ()),
         "CMAKE_C_FLAGS":   (("-Wundef",), ()),
-        },
+    },
     "warn_unused_local_typedefs": {
         "CMAKE_CXX_FLAGS": (("-Wunused-local-typedefs",), ()),
         "CMAKE_C_FLAGS":   (("-Wunused-local-typedefs",), ()),
-        },
+    },
     "warn_pointer_sign": {
         "CMAKE_CXX_FLAGS": (("",), ()),
         "CMAKE_C_FLAGS":   (("-Wpointer-sign",), ()),
-        },
+    },
     "warn_sizeof_pointer_memaccess": {
         "CMAKE_CXX_FLAGS": (("-Wsizeof-pointer-memaccess",), ()),
         "CMAKE_C_FLAGS":   (("-Wsizeof-pointer-memaccess",), ()),
-        },
+    },
     "warn_no_null": {
         "CMAKE_CXX_FLAGS": (("-Wnonnull",), ()),
         "CMAKE_C_FLAGS":   (("-Wnonnull",), ()),
-        },
+    },
     "warn_init_self": {
         "CMAKE_CXX_FLAGS": (("-Winit-self",), ()),
         "CMAKE_C_FLAGS":   (("-Winit-self",), ()),
-        },
+    },
     "warn_format": {
         "CMAKE_CXX_FLAGS": (("-Wformat=2", "-Wno-format-nonliteral", "-Wno-format-y2k"), ()),
         "CMAKE_C_FLAGS":   (("-Wformat=2", "-Wno-format-nonliteral", "-Wno-format-y2k"), ()),
-        },
+    },
     "warn_format": {
         "CMAKE_CXX_FLAGS": (("-Wwrite-strings",), ()),
         "CMAKE_C_FLAGS":   (("-Wwrite-strings",), ()),
-        },
+    },
     "warn_logical_op": {
         "CMAKE_CXX_FLAGS": (("-Wlogical-op",), ()),
         "CMAKE_C_FLAGS":   (("-Wlogical-op",), ()),
-        },
+    },
     "warn_error": {
         "CMAKE_CXX_FLAGS": (("-Werror",), ()),
         "CMAKE_C_FLAGS":   (("-Werror",), ()),
-        },
+    },
     "warn_shadow": {
         "CMAKE_CXX_FLAGS": (("-Wshadow", "-Wno-error=shadow"), ()),
         "CMAKE_C_FLAGS":   (("-Wshadow", "-Wno-error=shadow"), ()),
-        },
+    },
     "warn_missing_include_dirs": {
         "CMAKE_CXX_FLAGS": (("-Wmissing-include-dirs",), ()),
         "CMAKE_C_FLAGS":   (("-Wmissing-include-dirs",), ()),
-        },
+    },
     "warn_double_promotion": {
         "CMAKE_CXX_FLAGS": (("-Wdouble-promotion",), ()),
         "CMAKE_C_FLAGS":   (("-Wdouble-promotion",), ()),
-        },
+    },
     "warn_declaration_after_statement": {
         "CMAKE_C_FLAGS":   (("-Wdeclaration-after-statement",), ()),
-        },
+    },
     "warn_zero_as_null_pointer_constant": {
         "CMAKE_CXX_FLAGS": (("-Wzero-as-null-pointer-constant",), ()),
-        },
+    },
     "show_color": {
         "CMAKE_C_FLAGS": (("-fdiagnostics-color=always",), ()),
         "CMAKE_CXX_FLAGS": (("-fdiagnostics-color=always",), ()),
-        },
+    },
 
     # Optimize
     "optimize_whole_program": {
         "CMAKE_CXX_FLAGS": (("-flto",), ()),
         "CMAKE_C_FLAGS":   (("-flto",), ()),
         "CMAKE_EXE_LINKER_FLAGS": (("-flto", "-fwhole-program",), ()),
-        },
+    },
 
     # Profile
     "profile_gprof": {
         "CMAKE_CXX_FLAGS": (("-pg",), ()),
         "CMAKE_C_FLAGS":   (("-pg",), ()),
         "CMAKE_EXE_LINKER_FLAGS": (("-pg",), ()),
-        },
+    },
 }
 
 # ----------------------------------------------------------------------------
diff --git a/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py b/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py
index 2c6fdb30ecd2c0973c80c64772e36d37164aadf2..b5552d354735f5522fb26e6e2a9dd6362ee41d51 100755
--- a/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py
+++ b/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py
@@ -21,6 +21,7 @@ SOURCE_FILE = sys.argv[-1]
 # TODO, support other compilers
 COMPILER_ID = 'GCC'
 
+
 def find_arg(source, data):
     source_base = os.path.basename(source)
     for l in data:
@@ -46,10 +47,11 @@ def find_arg(source, data):
 
 def find_build_args_ninja(source):
     make_exe = "ninja"
-    process = subprocess.Popen([make_exe, "-t", "commands"],
-                                stdout=subprocess.PIPE,
-                                cwd=BUILD_DIR,
-                               )
+    process = subprocess.Popen(
+        [make_exe, "-t", "commands"],
+        stdout=subprocess.PIPE,
+        cwd=BUILD_DIR,
+    )
     while process.poll():
         time.sleep(1)
 
@@ -59,12 +61,14 @@ def find_build_args_ninja(source):
     data = out.decode("utf-8", errors="ignore").split("\n")
     return find_arg(source, data)
 
+
 def find_build_args_make(source):
     make_exe = "make"
-    process = subprocess.Popen([make_exe, "--always-make", "--dry-run", "--keep-going", "VERBOSE=1"],
-                                stdout=subprocess.PIPE,
-                                cwd=BUILD_DIR,
-                               )
+    process = subprocess.Popen(
+        [make_exe, "--always-make", "--dry-run", "--keep-going", "VERBOSE=1"],
+        stdout=subprocess.PIPE,
+        cwd=BUILD_DIR,
+    )
     while process.poll():
         time.sleep(1)
 
@@ -75,6 +79,7 @@ def find_build_args_make(source):
     data = out.decode("utf-8", errors="ignore").split("\n")
     return find_arg(source, data)
 
+
 def main():
     import re
 
@@ -106,7 +111,7 @@ def main():
     except ValueError:
         i = -1
     if i != -1:
-        del arg_split[:i + 1] 
+        del arg_split[:i + 1]
 
     if COMPILER_ID == 'GCC':
         # --- Switch debug for optimized ---
@@ -129,19 +134,17 @@ def main():
 
                 # asan flags
                 (re.compile(r"\-fsanitize=.*"), 1),
-                ):
+        ):
             if isinstance(arg, str):
                 # exact string compare
                 while arg in arg_split:
                     i = arg_split.index(arg)
-                    del arg_split[i : i + n]
+                    del arg_split[i: i + n]
             else:
                 # regex match
                 for i in reversed(range(len(arg_split))):
                     if arg.match(arg_split[i]):
-                        del arg_split[i : i + n]
-
-
+                        del arg_split[i: i + n]
 
         # add optimized args
         arg_split += ["-O3", "-fomit-frame-pointer", "-DNDEBUG", "-Wno-error"]
diff --git a/utils_ide/qtcreator/externaltools/qtc_blender_diffusion.py b/utils_ide/qtcreator/externaltools/qtc_blender_diffusion.py
index 915c949a030f39ec562218d6aab707e59585529f..26348506b37f3b9931cdcc49eb2e85fe2332e91d 100755
--- a/utils_ide/qtcreator/externaltools/qtc_blender_diffusion.py
+++ b/utils_ide/qtcreator/externaltools/qtc_blender_diffusion.py
@@ -17,14 +17,19 @@ SOURCE_ROW = sys.argv[-1]
 
 BASE_URL = "https://developer.blender.org/diffusion/B/browse"
 
+
 def main():
     dirname, filename = os.path.split(SOURCE_FILE)
 
-    process = subprocess.Popen(["git", "rev-parse", "--symbolic-full-name", "--abbrev-ref", "@{u}"], stdout=subprocess.PIPE, cwd=dirname, universal_newlines=True)
+    process = subprocess.Popen(
+        ["git", "rev-parse", "--symbolic-full-name", "--abbrev-ref",
+         "@{u}"], stdout=subprocess.PIPE, cwd=dirname, universal_newlines=True)
     output = process.communicate()[0]
     branchname = output.rstrip().rsplit('/', 1)[-1]
 
-    process = subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE, cwd=dirname, universal_newlines=True)
+    process = subprocess.Popen(
+        ["git", "rev-parse", "--show-toplevel"],
+        stdout=subprocess.PIPE, cwd=dirname, universal_newlines=True)
     output = process.communicate()[0]
     toplevel = output.rstrip()
     filepath = os.path.relpath(SOURCE_FILE, toplevel)
diff --git a/utils_ide/qtcreator/externaltools/qtc_doxy_file.py b/utils_ide/qtcreator/externaltools/qtc_doxy_file.py
index bca773c4aad48108921eb0b3aff30c0e9e7485d2..7e1fd80c13f4ccc0cdf405dc8b03e0e67fa19602 100755
--- a/utils_ide/qtcreator/externaltools/qtc_doxy_file.py
+++ b/utils_ide/qtcreator/externaltools/qtc_doxy_file.py
@@ -12,6 +12,7 @@ import os
 import subprocess
 import tempfile
 
+
 def find_gitroot(filepath_reference):
     path = filepath_reference
     path_prev = ""
diff --git a/utils_ide/qtcreator/externaltools/qtc_project_update.py b/utils_ide/qtcreator/externaltools/qtc_project_update.py
index 7ea5b8a4502193981cc1dd5c7af9adee7db142c1..d708f5aa2d6737ced20eb31ab935baff2c6e8f77 100755
--- a/utils_ide/qtcreator/externaltools/qtc_project_update.py
+++ b/utils_ide/qtcreator/externaltools/qtc_project_update.py
@@ -12,6 +12,7 @@ import os
 
 PROJECT_DIR = sys.argv[-1]
 
+
 def cmake_find_source(path):
     import re
     match = re.compile(r"^CMAKE_HOME_DIRECTORY\b")
@@ -28,8 +29,7 @@ cmd = (
     "python",
     os.path.join(SOURCE_DIR, "build_files/cmake/cmake_qtcreator_project.py"),
     PROJECT_DIR,
-    )
+)
 
 print(cmd)
 os.system(" ".join(cmd))
-
diff --git a/utils_ide/qtcreator/externaltools/qtc_select_surround.py b/utils_ide/qtcreator/externaltools/qtc_select_surround.py
index 2c2af5f6fa28f45a78ec86c07524effcc29ce09a..3d1dd2a984fe298f32f43c174885163151d2507a 100755
--- a/utils_ide/qtcreator/externaltools/qtc_select_surround.py
+++ b/utils_ide/qtcreator/externaltools/qtc_select_surround.py
@@ -7,4 +7,3 @@ txt = sys.stdin.read()
 print("(", end="")
 print(txt, end="")
 print(")", end="")
-
diff --git a/utils_ide/qtcreator/externaltools/qtc_sort_paths.py b/utils_ide/qtcreator/externaltools/qtc_sort_paths.py
index b2fee04f7d971d2d0c4f536b52cba1ee010f0ed4..f4d2609d91437adf0434cd9eaf7029d9655a435a 100755
--- a/utils_ide/qtcreator/externaltools/qtc_sort_paths.py
+++ b/utils_ide/qtcreator/externaltools/qtc_sort_paths.py
@@ -6,6 +6,7 @@ data = txt.split("\n")
 
 
 class PathCMP:
+
     def __init__(self, path):
         path = path.strip()