diff --git a/check_source/check_descriptions.py b/check_source/check_descriptions.py
index 13ef8715a972b97db02648bd9d79805b4dcd8370..327d2c989426b01c0aa9bfcd438fe5ceaa96be9a 100644
--- a/check_source/check_descriptions.py
+++ b/check_source/check_descriptions.py
@@ -52,19 +52,22 @@ DUPLICATE_WHITELIST = (
     ('ACTION_OT_select_less', 'GRAPH_OT_select_less'),
     ('ACTION_OT_select_linked', 'GRAPH_OT_select_linked'),
     ('ACTION_OT_select_more', 'GRAPH_OT_select_more'),
+    ('ACTION_OT_unlink', 'NLA_OT_action_unlink'),
     ('ACTION_OT_view_all', 'CLIP_OT_dopesheet_view_all', 'GRAPH_OT_view_all'),
-    ('ANIM_OT_change_frame', 'CLIP_OT_change_frame'),
-    ('ARMATURE_OT_armature_layers', 'POSE_OT_armature_layers'),
+    ('ACTION_OT_view_frame', 'GRAPH_OT_view_frame'),
+    ('ANIM_OT_change_frame', 'CLIP_OT_change_frame', 'IMAGE_OT_change_frame'),
     ('ARMATURE_OT_autoside_names', 'POSE_OT_autoside_names'),
     ('ARMATURE_OT_bone_layers', 'POSE_OT_bone_layers'),
     ('ARMATURE_OT_extrude_forked', 'ARMATURE_OT_extrude_move'),
+    ('ARMATURE_OT_flip_names', 'POSE_OT_flip_names'),
     ('ARMATURE_OT_select_all', 'POSE_OT_select_all'),
     ('ARMATURE_OT_select_hierarchy', 'POSE_OT_select_hierarchy'),
     ('ARMATURE_OT_select_linked', 'POSE_OT_select_linked'),
+    ('ARMATURE_OT_select_mirror', 'POSE_OT_select_mirror'),
     ('CLIP_OT_cursor_set', 'UV_OT_cursor_set'),
     ('CLIP_OT_disable_markers', 'CLIP_OT_graph_disable_markers'),
     ('CLIP_OT_graph_select_border', 'MASK_OT_select_border'),
-    ('CLIP_OT_view_ndof', 'IMAGE_OT_view_ndof'),
+    ('CLIP_OT_view_ndof', 'IMAGE_OT_view_ndof', 'VIEW2D_OT_ndof'),
     ('CLIP_OT_view_pan', 'IMAGE_OT_view_pan', 'VIEW2D_OT_pan', 'VIEW3D_OT_view_pan'),
     ('CLIP_OT_view_zoom', 'VIEW2D_OT_zoom'),
     ('CLIP_OT_view_zoom_in', 'VIEW2D_OT_zoom_in'),
@@ -73,19 +76,25 @@ DUPLICATE_WHITELIST = (
     ('CONSOLE_OT_delete', 'FONT_OT_delete', 'TEXT_OT_delete'),
     ('CONSOLE_OT_insert', 'FONT_OT_text_insert', 'TEXT_OT_insert'),
     ('CONSOLE_OT_paste', 'FONT_OT_text_paste', 'TEXT_OT_paste'),
-    ('CURVE_OT_duplicate', 'MASK_OT_duplicate'),
     ('CURVE_OT_handle_type_set', 'MASK_OT_handle_type_set'),
+    ('CURVE_OT_shortest_path_pick', 'MESH_OT_shortest_path_pick'),
     ('CURVE_OT_switch_direction', 'MASK_OT_switch_direction'),
     ('FONT_OT_line_break', 'TEXT_OT_line_break'),
     ('FONT_OT_move', 'TEXT_OT_move'),
     ('FONT_OT_move_select', 'TEXT_OT_move_select'),
+    ('FONT_OT_select_all', 'TEXT_OT_select_all'),
     ('FONT_OT_text_cut', 'TEXT_OT_cut'),
+    ('GRAPH_OT_previewrange_set', 'NLA_OT_previewrange_set'),
     ('GRAPH_OT_properties', 'IMAGE_OT_properties', 'LOGIC_OT_properties', 'NLA_OT_properties'),
+    ('IMAGE_OT_clear_render_border', 'VIEW3D_OT_clear_render_border'),
+    ('IMAGE_OT_render_border', 'VIEW3D_OT_render_border'),
+    ('IMAGE_OT_toolshelf', 'NODE_OT_toolbar', 'VIEW3D_OT_toolshelf'),
     ('LATTICE_OT_select_ungrouped', 'MESH_OT_select_ungrouped', 'PAINT_OT_vert_select_ungrouped'),
+    ('MESH_OT_extrude_region_move', 'MESH_OT_extrude_region_shrink_fatten'),
     ('NODE_OT_add_node', 'NODE_OT_add_search'),
     ('NODE_OT_move_detach_links', 'NODE_OT_move_detach_links_release'),
     ('NODE_OT_properties', 'VIEW3D_OT_properties'),
-    ('NODE_OT_toolbar', 'VIEW3D_OT_toolshelf'),
+    ('OBJECT_OT_bake', 'OBJECT_OT_bake_image'),
     ('OBJECT_OT_duplicate_move', 'OBJECT_OT_duplicate_move_linked'),
     ('WM_OT_context_cycle_enum', 'WM_OT_context_toggle', 'WM_OT_context_toggle_enum'),
     ('WM_OT_context_set_boolean', 'WM_OT_context_set_enum', 'WM_OT_context_set_float', 'WM_OT_context_set_int', 'WM_OT_context_set_string', 'WM_OT_context_set_value'),
@@ -107,7 +116,7 @@ def check_duplicates():
     # This is mainly useful for operators,
     # other types have too many false positives
 
-    #for t in (structs, funcs, ops, props):
+    # for t in (structs, funcs, ops, props):
     for t in (ops, ):
         description_dict = {}
         print("")
diff --git a/check_source/check_header_duplicate.py b/check_source/check_header_duplicate.py
index d5af3b71e8004d46133c4add0f4687fed9eadfd3..02217d5f70759f9c068ccecc3507ea98f92d9dba 100755
--- a/check_source/check_header_duplicate.py
+++ b/check_source/check_header_duplicate.py
@@ -87,7 +87,6 @@ def scan_source_recursive(dirpath, is_restore):
         ext = splitext(filename)[1]
         return (ext in {".hpp", ".hxx", ".h", ".hh"})
 
-
     def is_ignore(filename):
         pass
 
diff --git a/check_source/check_spelling.py b/check_source/check_spelling.py
index 54dd42356a2ee63ebf43831ae97bc73c8f0afc05..8a047c90c953b6ade37d2d35d494f1719b488c8a 100755
--- a/check_source/check_spelling.py
+++ b/check_source/check_spelling.py
@@ -290,8 +290,8 @@ def spell_check_comments(filepath):
 
     for comment in comment_list:
         for w in comment.parse():
-            #if len(w) < 15:
-            #    continue
+            # if len(w) < 15:
+            #     continue
 
             w_lower = w.lower()
             if w_lower in dict_custom or w_lower in dict_ignore:
diff --git a/check_source/check_spelling_c_config.py b/check_source/check_spelling_c_config.py
index 735a10dabb220f4c569d10ae77c2e1fe1314bdf9..844dc9e39257c623cc37932944318a00d2969476 100644
--- a/check_source/check_spelling_c_config.py
+++ b/check_source/check_spelling_c_config.py
@@ -22,125 +22,137 @@
 
 # correct spelling but ignore
 dict_custom = {
+    "adjoint", "adjugate",
+    "atomicity",
+    "boolean",
+    "decrement",
+    "decrementing",
+    "desaturate",
+    "enqueue",
+    "equiangular",
     "instantiation",
     "iterable",
-    "prepend",
-    "subclass", "subclasses", "subclassing",
     "merchantability",
+    "natively",
+    "parallelization",
+    "parallelized",
     "precalculate",
+    "prepend",
+    "probabilistically",
+    "recurse",
+    "subclass", "subclasses", "subclassing",
+    "subdirectory",
     "unregister",
     "unselected",
-    "subdirectory",
-    "decrement",
-    "boolean",
-    "decrementing",
     "variadic",
 
     # python types
-    "str",
     "enum", "enums",
     "int", "ints",
+    "str",
     "tuple", "tuples",
 
     # python functions
-    "repr",
     "func",
+    "repr",
 
     # accepted abbreviations
-    "config",
-    "recalc",
     "addon", "addons",
-    "subdir",
-    "struct", "structs",
-    "lookup", "lookups",
     "autocomplete",
-    "namespace",
-    "multi",
-    "keyframe", "keyframing",
+    "config",
     "coord", "coords",
     "dir",
-    "tooltip",
+    "keyframe", "keyframing",
+    "lookup", "lookups",
+    "multi",
     "multithreading",
+    "namespace",
+    "recalc",
+    "struct", "structs",
+    "subdir",
+    "tooltip",
 
     # general computer terms
-    "endian",
+    "XXX",
+    "app",
+    "autorepeat",
+    "blit", "blitting",
+    "boids",
+    "booleans",
+    "codepage",
     "contructor",
-    "unicode",
-    "jitter",
+    "decimator",
+    "diff",
+    "diffs",
+    "endian",
+    "env",
+    "euler", "eulers",
+    "foo",
+    "hashable",
+    "http",
+    "intrinsics",
+    "jitter", "jittering",
+    "keymap",
+    "lerp",
+    "metadata",
+    "opengl",
+    "preprocessor",
     "quantized",
     "searchable",
-    "metadata",
-    "hashable",
+    "segfault",
     "stdin",
-    "stdout",
     "stdin",
-    "opengl",
-    "boids",
-    "keymap",
-    "voxel", "voxels",
-    "vert", "verts",
-    "euler", "eulers",
-    "booleans",
-    "intrinsics",
-    "XXX",
-    "segfault",
-    "wiki",
-    "foo",
-    "diff",
-    "diffs",
+    "stdout",
     "sudo",
-    "http",
+    "touchpad", "touchpads",
+    "trackpad", "trackpads",
+    "unicode",
     "url",
     "usr",
-    "env",
-    "app",
-    "preprocessor",
-    "trackpad", "trackpads",
-    "touchpad", "touchpads",
-    "codepage",
-    "lerp",
-    "decimator",
-    "autorepeat",
+    "vert", "verts",
+    "voxel", "voxels",
+    "wiki",
 
     # specific computer terms/brands
-    "posix",
-    "unix",
+    "ack",
     "amiga",
-    "netscape",
-    "mozilla",
+    "bzflag",
+    "freebsd",
     "irix",
     "kde",
+    "mozilla",
+    "netscape",
+    "posix",
     "qtcreator",
-    "ack",
-    "bzflag",
+    "scons",
     "sdl",
+    "unix",
     "xinerama",
-    "scons",
 
     # general computer graphics terms
-    "colinear",
-    "coplanar",
+    "atomics",
     "barycentric",
     "bezier",
-    "fresnel",
-    "radiosity",
-    "reflectance",
-    "specular",
-    "nurbs",
-    "ngon", "ngons",
     "bicubic",
+    "centroid",
+    "colinear",
     "compositing",
+    "coplanar",
     "deinterlace",
-    "shader",
-    "shaders",
-    "centroid",
     "emissive",
-    "quaternions",
+    "fresnel",
+    "kerning",
     "lacunarity",
     "musgrave",
+    "ngon", "ngons",
     "normals",
-    "kerning",
-    "atomics",
+    "nurbs",
+    "quaternions",
+    "radiosity",
+    "reflectance",
+    "shader",
+    "shaders",
+    "specular",
 
     # blender terms
     "animsys",
diff --git a/check_source/check_style_c.py b/check_source/check_style_c.py
index 19e739e9f0c99180f343d60565d473713245ba54..4097c0eaa03c93f2454a15665021ee87c6f5ba49 100755
--- a/check_source/check_style_c.py
+++ b/check_source/check_style_c.py
@@ -1090,6 +1090,9 @@ def quick_check_indentation(lines):
     t_prev = -1
     ls_prev = ""
 
+    ws_prev = ""
+    ws_prev_expand = ""
+
     for i, l in enumerate(lines):
         skip = False
 
@@ -1147,6 +1150,17 @@ def quick_check_indentation(lines):
                                  (t - t_prev, tabs), i + 1)
             t_prev = t
 
+            # check for same indentation with different space/tab mix
+            ws = l[:len(l) - len(l.lstrip())]
+            ws_expand = ws.expandtabs(4)
+            if ws_expand == ws_prev_expand:
+                if ws != ws_prev:
+                    warning_lineonly("E152", "indentation tab/space mismatch",
+                                     i + 1)
+            ws_prev = ws
+            ws_prev_expand = ws_expand
+
+
 import re
 re_ifndef = re.compile("^\s*#\s*ifndef\s+([A-z0-9_]+).*$")
 re_define = re.compile("^\s*#\s*define\s+([A-z0-9_]+).*$")
diff --git a/check_source/check_style_c_config.py b/check_source/check_style_c_config.py
index 6efb386384d7bdb23c7363460b87b22d2ba36aee..617436421c597eba591d929ec46e186b252266e0 100644
--- a/check_source/check_style_c_config.py
+++ b/check_source/check_style_c_config.py
@@ -42,7 +42,6 @@ IGNORE_DIR = (
     "source/blender/editors/physics",
     "source/blender/editors/space_logic",
     "source/blender/freestyle",
-    "source/blender/gpu",
     )
 
 
diff --git a/utils/blend2json.py b/utils/blend2json.py
new file mode 100755
index 0000000000000000000000000000000000000000..976c647ff772094eb035ac796957c5fc526e2dde
--- /dev/null
+++ b/utils/blend2json.py
@@ -0,0 +1,418 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+# (c) 2015, Blender Foundation - Bastien Montagne
+
+# <pep8 compliant>
+
+
+"""
+This is a tool for generating a JSon version of a blender file (only its structure, or all its data included).
+
+It can also run some simple validity checks over a .blend file.
+
+WARNING! This is still WIP tool!
+
+Example usage:
+
+   ./blend2json.py -i foo.blend
+
+To output complete DNA struct info:
+
+   ./blend2json.py --full-dna -i foo.blend
+
+To avoid getting all 'uid' old addresses (those will change really often even when data itself does not change,
+making diff pretty noisy):
+
+   ./blend2json.py --no-old-addresses -i foo.blend
+
+To check a .blend file instead of outputting its JSon version (use explicit -o option to do both at the same time):
+
+   ./blend2json.py -c -i foo.blend
+
+"""
+
+FILTER_DOC = """
+Each generic filter is made of three arguments, the include/exclude toggle ('+'/'-'), a regex to match against the name
+of the field to check (either one of the 'meta-data' generated by json exporter, or actual data field from DNA structs),
+and some regex to match against the data of this field (JSON-ified representation of the data, hence always a string).
+
+Filters are evaluated in the order they are given, that is, if a block does not pass the first filter,
+it is immediately rejected and no further check is done on it.
+
+You can add some recursivity to a filter (that is, if an 'include' filter is successful over a 'pointer' property,
+it will also automatically include pointed data, with a level of recusivity), by adding either
+'*' (for infinite recursion) or a number (to specify the maximum level of recursion) to the include/exclude toggle.
+Note that it only makes sense in 'include' case, and gets ignored for 'exclude' one.
+
+Examples:
+
+To include only MESH blocks:
+
+   ./blend2json.py --filter-block "+" "code" "ME" foo.blend
+
+To include only MESH or CURVE blocks and all data used by them:
+
+   ./blend2json.py --filter-block "+" "code" "(ME)|(CU)" --filter-block "+*" ".*" ".*" foo.blend
+
+"""
+
+import os
+import struct
+import logging
+import gzip
+import tempfile
+import json
+import re
+
+# Avoid maintaining multiple blendfile modules
+import sys
+sys.path.append(os.path.join(
+        os.path.dirname(__file__),
+        "..", "..", "..",
+        "release", "scripts", "addons", "io_blend_utils", "blend",
+        ))
+del sys
+
+import blendfile
+
+
+##### Utils (own json formating) #####
+
+
+def json_default(o):
+    if isinstance(o, bytes):
+        return repr(o)[2:-1]
+    elif i is ...:
+        return "<...>"
+    return o
+
+
+def json_dumps(i):
+    return json.dumps(i, default=json_default)
+
+
+def keyval_to_json(kvs, indent, indent_step, compact_output=False):
+    if compact_output:
+        return ('{' + ', '.join('"%s": %s' % (k, v) for k, v in kvs) + '}')
+    else:
+        return ('{%s' % indent_step[:-1] +
+                (',\n%s%s' % (indent, indent_step)).join(
+                    ('"%s":\n%s%s%s' % (k, indent, indent_step, v) if (v[0] in {'[', '{'}) else
+                     '"%s": %s' % (k, v)) for k, v in kvs) +
+                '\n%s}' % indent)
+
+
+def list_to_json(lst, indent, indent_step, compact_output=False):
+    if compact_output:
+        return ('[' + ', '.join(l for l in lst) + ']')
+    else:
+        return ('[%s' % indent_step[:-1] +
+                ((',\n%s%s' % (indent, indent_step)).join(
+                    ('\n%s%s%s' % (indent, indent_step, l) if (i == 0 and l[0] in {'[', '{'}) else l)
+                    for i, l in enumerate(lst))
+                ) +
+                '\n%s]' % indent)
+
+
+##### Main 'struct' writers #####
+
+def gen_fake_addresses(args, blend):
+    if args.use_fake_address:
+        hashes = set()
+        ret = {}
+        for block in blend.blocks:
+            if not block.addr_old:
+                continue
+            hsh = block.get_data_hash()
+            while hsh in hashes:
+                hsh += 1
+            hashes.add(hsh)
+            ret[block.addr_old] = hsh
+        return ret
+
+    return {}
+
+
+def bheader_to_json(args, fw, blend, indent, indent_step):
+    fw('%s"%s": [\n' % (indent, "HEADER"))
+    indent = indent + indent_step
+
+    keyval = (
+        ("magic", json_dumps(blend.header.magic)),
+        ("pointer_size", json_dumps(blend.header.pointer_size)),
+        ("is_little_endian", json_dumps(blend.header.is_little_endian)),
+        ("version", json_dumps(blend.header.version)),
+    )
+    keyval = keyval_to_json(keyval, indent, indent_step)
+    fw('%s%s' % (indent, keyval))
+
+    indent = indent[:-len(indent_step)]
+    fw('\n%s]' % indent)
+
+
+def do_bblock_filter(filters, blend, block, meta_keyval, data_keyval):
+    def do_bblock_filter_data_recursive(blend, block, rec_lvl, rec_iter, key=None):
+        fields = (blend.structs[block.sdna_index].fields if key is None else
+                  [blend.structs[block.sdna_index].field_from_name.get(key[1:-1].encode())])
+        for fld in fields:
+            if fld is None:
+                continue
+            if fld.dna_name.is_pointer:
+                paths = ([(fld.dna_name.name_only, i) for i in range(fld.dna_name.array_size)]
+                         if fld.dna_name.array_size > 1 else [fld.dna_name.name_only])
+                for p in paths:
+                    child_block = block.get_pointer(p)
+                    if child_block is not None:
+                        child_block.user_data = max(block.user_data, rec_iter)
+                        if rec_lvl != 0:
+                            do_bblock_filter_data_recursive(blend, child_block, rec_lvl - 1, rec_iter + 1)
+
+    has_include = False
+    do_break = False
+    rec_iter = 1
+    if block.user_data is None:
+        block.user_data = 0
+    for include, rec_lvl, key, val in filters:
+        if rec_lvl < 0:
+            rec_lvl = 100
+        has_include = has_include or include
+        # Skip exclude filters if block was already processed some way.
+        if not include and block.user_data is not None:
+            continue
+        has_match = False
+        for k, v in meta_keyval:
+            if key.search(k) and val.search(v):
+                has_match = True
+                if include:
+                    block.user_data = max(block.user_data, rec_iter)
+                    # Note that in include cases, we have to keep checking filters, since some 'include recursive'
+                    # ones may still have to be processed...
+                else:
+                    block.user_data = min(block.user_data, -rec_iter)
+                    do_break = True  # No need to check more filters in exclude case...
+                    break
+        for k, v in data_keyval:
+            if key.search(k) and val.search(v):
+                has_match = True
+                if include:
+                    block.user_data = max(block.user_data, rec_iter)
+                    if rec_lvl != 0:
+                        do_bblock_filter_data_recursive(blend, block, rec_lvl - 1, rec_iter + 1, k)
+                    # Note that in include cases, we have to keep checking filters, since some 'include recursive'
+                    # ones may still have to be processed...
+                else:
+                    block.user_data = min(block.user_data, -rec_iter)
+                    do_break = True  # No need to check more filters in exclude case...
+                    break
+        if include and not has_match:  # Include check failed, implies exclusion.
+            block.user_data = min(block.user_data, -rec_iter)
+            do_break = True  # No need to check more filters in exclude case...
+        if do_break:
+            break
+    # Implicit 'include all' in case no include filter is specified...
+    if block.user_data == 0 and not has_include:
+        block.user_data = max(block.user_data, rec_iter)
+
+
+def bblocks_to_json(args, fw, blend, address_map, indent, indent_step):
+    no_address = args.no_address
+    full_data = args.full_data
+
+    def gen_meta_keyval(blend, block):
+        keyval = [
+            ("code", json_dumps(block.code)),
+            ("size", json_dumps(block.size)),
+        ]
+        if not no_address:
+            keyval += [("addr_old", json_dumps(address_map.get(block.addr_old, block.addr_old)))]
+        keyval += [
+            ("dna_type_id", json_dumps(blend.structs[block.sdna_index].dna_type_id)),
+            ("count", json_dumps(block.count)),
+        ]
+        return keyval
+
+    def gen_data_keyval(blend, block):
+        def _is_pointer(k):
+            return blend.structs[block.sdna_index].field_from_path(blend.header, blend.handle, k).dna_name.is_pointer
+        return [(json_dumps(k)[1:-1], json_dumps(address_map.get(v, v) if _is_pointer(k) else v))
+                for k, v in block.items_recursive_iter()]
+
+    if args.block_filters:
+        for block in blend.blocks:
+            meta_keyval = gen_meta_keyval(blend, block)
+            data_keyval = gen_data_keyval(blend, block)
+            do_bblock_filter(args.block_filters, blend, block, meta_keyval, data_keyval)
+
+    fw('%s"%s": [\n' % (indent, "DATA"))
+    indent = indent + indent_step
+
+    is_first = True
+    for i, block in enumerate(blend.blocks):
+        if block.user_data is None or block.user_data > 0:
+            meta_keyval = gen_meta_keyval(blend, block)
+            if full_data:
+                meta_keyval.append(("data", keyval_to_json(gen_data_keyval(blend, block),
+                                                           indent + indent_step, indent_step, args.compact_output)))
+            keyval = keyval_to_json(meta_keyval, indent, indent_step, args.compact_output)
+            fw('%s%s%s' % ('' if is_first else ',\n', indent, keyval))
+            is_first = False
+
+    indent = indent[:-len(indent_step)]
+    fw('\n%s]' % indent)
+
+
+def bdna_to_json(args, fw, blend, indent, indent_step):
+    full_dna = args.full_dna and not args.compact_output
+
+    def bdna_fields_to_json(blend, dna, indent, indent_step):
+        lst = []
+        for i, field in enumerate(dna.fields):
+            keyval = (
+                ("dna_name", json_dumps(field.dna_name.name_only)),
+                ("dna_type_id", json_dumps(field.dna_type.dna_type_id)),
+                ("is_pointer", json_dumps(field.dna_name.is_pointer)),
+                ("is_method_pointer", json_dumps(field.dna_name.is_method_pointer)),
+                ("array_size", json_dumps(field.dna_name.array_size)),
+            )
+            lst.append(keyval_to_json(keyval, indent + indent_step, indent_step))
+        return list_to_json(lst, indent, indent_step)
+
+    fw('%s"%s": [\n' % (indent, "DNA_STRUCT"))
+    indent = indent + indent_step
+
+    is_first = True
+    for dna in blend.structs:
+        keyval = [
+            ("dna_type_id", json_dumps(dna.dna_type_id)),
+            ("size", json_dumps(dna.size)),
+        ]
+        if full_dna:
+            keyval += [("fields", bdna_fields_to_json(blend, dna, indent + indent_step, indent_step))]
+        else:
+            keyval += [("nbr_fields", json_dumps(len(dna.fields)))]
+        keyval = keyval_to_json(keyval, indent, indent_step, args.compact_output)
+        fw('%s%s%s' % ('' if is_first else ',\n', indent, keyval))
+        is_first = False
+
+    indent = indent[:-len(indent_step)]
+    fw('\n%s]' % indent)
+
+
+def blend_to_json(args, f, blend, address_map):
+    fw = f.write
+    fw('{\n')
+    indent = indent_step = "  "
+    bheader_to_json(args, fw, blend, indent, indent_step)
+    fw(',\n')
+    bblocks_to_json(args, fw, blend, address_map, indent, indent_step)
+    fw(',\n')
+    bdna_to_json(args, fw, blend, indent, indent_step)
+    fw('\n}\n')
+
+
+##### Checks #####
+
+def check_file(args, blend):
+    addr_old = set()
+    for block in blend.blocks:
+        if block.addr_old in addr_old:
+            print("ERROR! Several data blocks share same 'addr_old' uuid %d, "
+                  "this should never happen!" % block.addr_old)
+            continue
+        addr_old.add(block.addr_old)
+
+
+##### Main #####
+
+def argparse_create():
+    import argparse
+    global __doc__
+
+    # When --help or no args are given, print this help
+    usage_text = __doc__
+
+    epilog = "This script is typically used to check differences between .blend files, or to check their validity."
+
+    parser = argparse.ArgumentParser(description=usage_text, epilog=epilog,
+                                     formatter_class=argparse.RawDescriptionHelpFormatter)
+
+    parser.add_argument(dest="input", nargs="+", metavar='PATH',
+            help="Input .blend file(s)")
+    parser.add_argument("-o", "--output", dest="output", action="append", metavar='PATH', required=False,
+            help="Output .json file(s) (same path/name as input file(s) if not specified)")
+    parser.add_argument("-c", "--check-file", dest="check_file", default=False, action='store_true', required=False,
+            help=("Perform some basic validation checks over the .blend file"))
+    parser.add_argument("--compact-output", dest="compact_output", default=False, action='store_true', required=False,
+            help=("Output a very compact representation of blendfile (one line per block/DNAStruct)"))
+    parser.add_argument("--no-old-addresses", dest="no_address", default=False, action='store_true', required=False,
+            help=("Do not output old memory address of each block of data "
+                  "(used as 'uuid' in .blend files, but change pretty noisily)"))
+    parser.add_argument("--no-fake-old-addresses", dest="use_fake_address", default=True, action='store_false',
+            required=False,
+            help=("Do not 'rewrite' old memory address of each block of data "
+                  "(they are rewritten by default to some hash of their content, "
+                  "to try to avoid too much diff noise between different but similar files)"))
+    parser.add_argument("--full-data", dest="full_data",
+            default=False, action='store_true', required=False,
+            help=("Also put in JSon file data itself "
+                  "(WARNING! will generate *huge* verbose files - and is far from complete yet)"))
+    parser.add_argument("--full-dna", dest="full_dna", default=False, action='store_true', required=False,
+            help=("Also put in JSon file dna properties description (ignored when --compact-output is used)"))
+
+    group = parser.add_argument_group("Filters", FILTER_DOC)
+    group.add_argument("--filter-block", dest="block_filters", nargs=3, action='append',
+            help=("Filter to apply to BLOCKS (a.k.a. data itself)"))
+
+    return parser
+
+
+def main():
+    # ----------
+    # Parse Args
+
+    args = argparse_create().parse_args()
+
+    if not args.output:
+        if args.check_file:
+            args.output = [None] * len(args.input)
+        else:
+            args.output = [os.path.splitext(infile)[0] + ".json" for infile in args.input]
+
+    if args.block_filters:
+        args.block_filters = [(True if m[0] == "+" else False,
+                               0 if len(m) == 1 else (-1 if m[1] == "*" else int(m[1:])),
+                               re.compile(f), re.compile(d))
+                              for m, f, d in args.block_filters]
+
+    for infile, outfile in zip(args.input, args.output):
+        with blendfile.open_blend(infile) as blend:
+            address_map = gen_fake_addresses(args, blend)
+
+            if args.check_file:
+                check_file(args, blend)
+
+            if outfile:
+                with open(outfile, 'w', encoding="ascii", errors='xmlcharrefreplace') as f:
+                    blend_to_json(args, f, blend, address_map)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/credits_git_gen.py b/utils/credits_git_gen.py
index de5bb8ad47f464ca3591fbd2251e6e5015eabe25..f1049a89bec8fe3e9a7110273419b73985df8b53 100755
--- a/utils/credits_git_gen.py
+++ b/utils/credits_git_gen.py
@@ -166,4 +166,3 @@ def main():
 
 if __name__ == "__main__":
     main()
-
diff --git a/utils/git_log_review_commits_advanced.py b/utils/git_log_review_commits_advanced.py
index e29a9f891324038fb50a2176feeb7f0eff6f8ae1..7294b5e74d977ad7f103fbf9cf78e8e8d58c1cc4 100755
--- a/utils/git_log_review_commits_advanced.py
+++ b/utils/git_log_review_commits_advanced.py
@@ -36,6 +36,11 @@ proof-reading after this tool has ran is heavily suggested!
 Example usage:
 
    ./git_log_review_commits_advanced.py  --source ../../src --range HEAD~40..HEAD --filter 'BUGFIX' --accept-pretty --accept-releaselog --blender-rev 2.75
+
+To add list of fixes between RC2 and RC3, and list both RC2 and RC3 fixes also in their own sections:
+
+   ./git_log_review_commits_advanced.py  --source ../../src --range <RC2 revision>..<RC3 revision> --filter 'BUGFIX' --accept-pretty --accept-releaselog --blender-rev 2.76 --blender-rstate=RC3 --blender-rstate-list="RC2,RC3"
+
 """
 
 import os
@@ -48,6 +53,9 @@ REJECT_FILE = "review_reject.txt"
 ACCEPT_PRETTY_FILE = "review_accept_pretty.txt"
 ACCEPT_RELEASELOG_FILE = "review_accept_release_log.txt"
 
+IGNORE_START_LINE = "<!-- IGNORE_START -->"
+IGNORE_END_LINE = "<!-- IGNORE_END -->"
+
 _cwd = os.getcwd()
 __doc__ = __doc__ + \
           "\nRaw GIT revisions files:\n\t* Accepted: %s\n\t* Rejected: %s\n\n" \
@@ -56,6 +64,7 @@ __doc__ = __doc__ + \
              os.path.join(_cwd, ACCEPT_PRETTY_FILE), os.path.join(_cwd, ACCEPT_RELEASELOG_FILE))
 del _cwd
 
+
 class _Getch:
     """
     Gets a single character from standard input.
@@ -140,6 +149,7 @@ BUGFIX_CATEGORIES = (
         "Constraints",
         "Grease Pencil",
         "Objects",
+        "Dependency Graph",
         ),
     ),
 
@@ -149,6 +159,7 @@ BUGFIX_CATEGORIES = (
         "Mesh Editing",
         "Meta Editing",
         "Modifiers",
+        "Material / Texture",
         ),
     ),
 
@@ -223,19 +234,21 @@ re_commitify = re.compile(r"\W(r(?:B|BA|BAC|BTS)[0-9a-fA-F]{6,})")
 re_prettify = re.compile(r"(.{,20}?)(Fix(?:ing|es)?\s*(?:for)?\s*" + re_bugify_str + r")\s*[-:,]*\s*", re.IGNORECASE)
 
 
-def gen_commit_pretty(c, unreported=None):
+def gen_commit_pretty(c, unreported=None, rstate=None):
     # In git, all commit message lines until first empty one are part of 'summary'.
     body = c.body.split("\n\n")[0].strip(" :.;-\n").replace("\n", " ")
 
-    tbody = re_prettify.sub(r"* Fix {{BugReport|\3}}: \1", body)
+    tbody = re_prettify.sub(r"Fix {{BugReport|\3}}: \1", body)
     if tbody == body:
         if unreported is not None:
             unreported[0] = True
-        tbody = "* Fix unreported: %s" % body
+        tbody = "Fix unreported: %s" % body
     body = re_bugify.sub(r"{{BugReport|\1}}", tbody)
     body = re_commitify.sub(r"{{GitCommit|\1}}", body)
 
-    return "%s ({{GitCommit|rB%s}})." % (body, c.sha1.decode()[:10])
+    if rstate is not None:
+        return "* [%s] %s ({{GitCommit|rB%s}})." % (rstate, body, c.sha1.decode()[:10])
+    return "* %s ({{GitCommit|rB%s}})." % (body, c.sha1.decode()[:10])
 
 
 def print_categories_tree():
@@ -245,12 +258,21 @@ def print_categories_tree():
             print("\t\t[%d] %s" % (j, sub_cat))
 
 
-def release_log_init(path, source_dir, blender_rev, start_sha1, end_sha1):
+def release_log_init(path, source_dir, blender_rev, start_sha1, end_sha1, rstate, rstate_list):
     from git_log import GitRepo
 
-    if os.path.exists(path):
-        release_log = {}
+    if rstate is not None:
+        header = "= Blender %s: Bug Fixes =\n\n" \
+                 "[%s] Changes from revision {{GitCommit|%s}} to {{GitCommit|%s}}, inclusive.\n\n" \
+                 % (blender_rev, rstate, start_sha1[:10], end_sha1[:10])
+    else:
+        header = "= Blender %s: Bug Fixes =\n\n" \
+                 "Changes from revision {{GitCommit|%s}} to {{GitCommit|%s}}, inclusive.\n\n" \
+                 % (blender_rev, start_sha1[:10], end_sha1[:10])
+
+    release_log = {"__HEADER__": header, "__COUNT__": [0, 0], "__RSTATES__": {k: [] for k in rstate_list}}
 
+    if os.path.exists(path):
         branch = GitRepo(source_dir).branch.decode().strip()
 
         sub_cats_to_main_cats = {s_cat: m_cat[0] for m_cat in BUGFIX_CATEGORIES for s_cat in m_cat[1]}
@@ -259,11 +281,24 @@ def release_log_init(path, source_dir, blender_rev, start_sha1, end_sha1):
             header = []
             main_cat = None
             sub_cat = None
+            ignore = False
             for l in f:
+                if IGNORE_END_LINE in l:
+                    ignore = False
+                    continue
+                elif ignore or IGNORE_START_LINE in l:
+                    ignore = True
+                    continue
                 l = l.strip(" \n")
                 if not header:
                     header.append(l)
                     for hl in f:
+                        if IGNORE_END_LINE in hl:
+                            ignore = False
+                            continue
+                        elif ignore or IGNORE_START_LINE in hl:
+                            ignore = True
+                            continue
                         hl = hl.strip(" \n")
                         if hl.startswith("=="):
                             main_cat = hl.strip(" =")
@@ -276,9 +311,15 @@ def release_log_init(path, source_dir, blender_rev, start_sha1, end_sha1):
                             break
                         header.append(hl)
 
-                    release_log["__HEADER__"] = "%s\nChanges from revision {{GitCommit|%s}} to {{GitCommit|%s}}, " \
-                                                "inclusive (''%s'' branch).\n\n" \
-                                                "" % ("\n".join(header), start_sha1[:10], end_sha1[:10], branch)
+                    if rstate is not None:
+                        release_log["__HEADER__"] = "%s[%s] Changes from revision {{GitCommit|%s}} to " \
+                                                    "{{GitCommit|%s}}, inclusive (''%s'' branch).\n\n" \
+                                                    "" % ("\n".join(header), rstate,
+                                                          start_sha1[:10], end_sha1[:10], branch)
+                    else:
+                        release_log["__HEADER__"] = "%sChanges from revision {{GitCommit|%s}} to {{GitCommit|%s}}, " \
+                                                    "inclusive (''%s'' branch).\n\n" \
+                                                    "" % ("\n".join(header), start_sha1[:10], end_sha1[:10], branch)
                     count = release_log["__COUNT__"] = [0, 0]
                     continue
 
@@ -311,23 +352,26 @@ def release_log_init(path, source_dir, blender_rev, start_sha1, end_sha1):
                         main_cat_data_unreported.setdefault(sub_cat, []).append(l)
                         count[1] += 1
                         #~ print("l UNREPORTED:", l)
-        return release_log
+                    l_rstate = l.strip("* ")
+                    if l_rstate.startswith("["):
+                        end = l_rstate.find("]")
+                        if end > 0:
+                            rstate = l_rstate[1:end]
+                            if rstate in release_log["__RSTATES__"]:
+                                release_log["__RSTATES__"][rstate].append("* %s" % l_rstate[end + 1:].strip())
 
-    else:
-        header = "= Blender %s: Bug Fixes =\n\n" \
-                 "Changes from revision {{GitCommit|%s}} to {{GitCommit|%s}}, inclusive.\n\n" \
-                 % (blender_rev, start_sha1[:10], end_sha1[:10])
+    return release_log
 
-        return {"__HEADER__": header, "__COUNT__": [0, 0]}
 
+def write_release_log(path, release_log, c, cat, rstate, rstate_list):
+    import io
 
-def write_release_log(path, release_log, c, cat):
     main_cat, sub_cats = BUGFIX_CATEGORIES[cat[0]]
     sub_cat = sub_cats[cat[1]] if cat[1] is not None else None
 
     main_cat_data, main_cat_data_unreported = release_log.setdefault(main_cat, ({}, {}))
     unreported = [False]
-    entry = gen_commit_pretty(c, unreported)
+    entry = gen_commit_pretty(c, unreported, rstate)
     if unreported[0]:
         main_cat_data_unreported.setdefault(sub_cat, []).append(entry)
         release_log["__COUNT__"][1] += 1
@@ -335,47 +379,70 @@ def write_release_log(path, release_log, c, cat):
         main_cat_data.setdefault(sub_cat, []).append(entry)
         release_log["__COUNT__"][0] += 1
 
+    if rstate in release_log["__RSTATES__"]:
+        release_log["__RSTATES__"][rstate].append(gen_commit_pretty(c))
+
+    lines = []
+    main_cat_lines = []
+    sub_cat_lines = []
+    for main_cat, sub_cats in BUGFIX_CATEGORIES:
+        main_cat_data = release_log.get(main_cat, ({}, {}))
+        main_cat_lines[:] = ["== %s ==" % main_cat]
+        for data in main_cat_data:
+            entries = data.get(None, [])
+            if entries:
+                main_cat_lines.extend(entries)
+                main_cat_lines.append("")
+        if len(main_cat_lines) == 1:
+            main_cat_lines.append("")
+        for sub_cat in sub_cats:
+            sub_cat_lines[:] = ["=== %s ===" % sub_cat]
+            for data in main_cat_data:
+                entries = data.get(sub_cat, [])
+                if entries:
+                    sub_cat_lines.extend(entries)
+                    sub_cat_lines.append("")
+            if len(sub_cat_lines) > 2:
+                main_cat_lines += sub_cat_lines
+        if len(main_cat_lines) > 2:
+            lines += main_cat_lines
+
+    if None in release_log:
+        main_cat_data = release_log.get(None, ({}, {}))
+        main_cat_lines[:] = ["== %s ==\n\n" % "UNSORTED"]
+        for data in main_cat_data:
+            entries = data.get(None, [])
+            if entries:
+                main_cat_lines.extend(entries)
+                main_cat_lines.append("")
+        if len(main_cat_lines) > 2:
+            lines += main_cat_lines
+
     with open(path, 'w') as f:
         f.write(release_log["__HEADER__"])
+
         count = release_log["__COUNT__"]
+        f.write("%s\n" % IGNORE_START_LINE)
         f.write("Total fixed bugs: %d (%d from tracker, %d reported/found by other ways).\n\n"
                 "" % (sum(count), count[0], count[1]))
-        for main_cat, sub_cats in BUGFIX_CATEGORIES:
-            main_cat_data = release_log.get(main_cat, ({}, {}))
-            f.write("== %s ==\n" % main_cat)
-            data_written = False
-            for data in main_cat_data:
-                entries = data.get(None, [])
-                if entries:
-                    f.write("\n".join(entries))
-                    f.write("\n\n")
-                    data_written = True
-            if not data_written:
-                f.write("\n")
-            for sub_cat in sub_cats:
-                f.write("=== %s ===\n" % sub_cat)
-                data_written = False
-                for data in main_cat_data:
-                    entries = data.get(sub_cat, [])
-                    if entries:
-                        f.write("\n".join(entries))
-                        f.write("\n\n")
-                        data_written = True
-                if not data_written:
-                    f.write("None.\n\n")
-
-        if None in release_log:
-            main_cat_data = release_log.get(main_cat, ({}, {}))
-            f.write("== %s ==\n\n" % "UNSORTED")
-            data_written = False
-            for data in main_cat_data:
-                entries = data.get(None, [])
-                if entries:
-                    f.write("\n".join(entries))
-                    f.write("\n\n")
-                    data_written = True
-            if not data_written:
-                f.write("None.\n\n")
+        f.write("%s\n%s\n\n" % ("{{Note|Note|Before RC1 (i.e. during regular development of next version in master "
+                                "branch), only fixes of issues which already existed in previous official releases are "
+                                "listed here. Fixes for regressions introduced since last release, or for new "
+                                "features, are '''not''' listed here.<br/>For following RCs and final release, "
+                                "'''all''' backported fixes are listed.}}", IGNORE_END_LINE))
+
+        f.write("\n".join(lines))
+        f.write("\n")
+
+        f.write("%s\n\n<hr/>\n\n" % IGNORE_START_LINE)
+        for rst in rstate_list:
+            entries = release_log["__RSTATES__"].get(rst, [])
+            if entries:
+                f.write("== %s ==\n" % rst)
+                f.write("For %s, %d bugs were fixed:\n\n" % (rst, len(entries)))
+                f.write("\n".join(entries))
+                f.write("\n\n")
+        f.write("%s\n" % IGNORE_END_LINE)
 
 
 def argparse_create():
@@ -411,6 +478,14 @@ def argparse_create():
     parser.add_argument("--blender-rev", dest="blender_rev",
             default=None, required=False,
             help=("Blender revision (only used to generate release notes page)"))
+    parser.add_argument("--blender-rstate", dest="blender_rstate",
+            default="alpha", required=False,
+            help=("Blender release state (like alpha, beta, rc1, final, corr_a, corr_b, etc.), "
+                  "each revision will be tagged by given one"))
+    parser.add_argument("--blender-rstate-list", dest="blender_rstate_list",
+            default="", required=False, type=lambda s: s.split(","),
+            help=("Blender release state(s) to additionaly list in their own sections "
+                  "(e.g. pass 'RC2' to list fixes between RC1 and RC2, ie tagged as RC2, etc.)"))
 
     return parser
 
@@ -459,7 +534,8 @@ def main():
         blender_rev = args.blender_rev or "<UNKNOWN>"
         commits = tuple(GitCommitIter(args.source_dir, args.range_sha1))
         release_log = release_log_init(ACCEPT_RELEASELOG_FILE, args.source_dir, blender_rev,
-                                       commits[-1].sha1.decode(), commits[0].sha1.decode())
+                                       commits[-1].sha1.decode(), commits[0].sha1.decode(),
+                                       args.blender_rstate, args.blender_rstate_list)
         commits = [c for c in commits if match(c)]
     else:
         commits = [c for c in GitCommitIter(args.source_dir, args.range_sha1) if match(c)]
@@ -561,7 +637,7 @@ def main():
                             c1 = get_cat(ch, len(BUGFIX_CATEGORIES))
                         elif c2 is None:
                             if ch == b'\r':
-                                break;
+                                break
                             elif ch == b'\x7f':  # backspace
                                 c1 = None
                                 continue
@@ -571,12 +647,13 @@ def main():
                         else:
                             print("BUG! this should not happen!")
 
-                    if done_main == False:
+                    if done_main is False:
                         # Go back to main loop, this commit is no more accepted nor rejected.
                         tot_accept -= 1
                         continue
 
-                    write_release_log(ACCEPT_RELEASELOG_FILE, release_log, c, (c1, c2))
+                    write_release_log(ACCEPT_RELEASELOG_FILE, release_log, c, (c1, c2),
+                                      args.blender_rstate, args.blender_rstate_list)
                 break
             elif ch == b'\r':
                 log_filepath = REJECT_FILE
@@ -591,7 +668,7 @@ def main():
 
         if args.accept_pretty and log_filepath_pretty:
             with open(log_filepath_pretty, 'a') as f:
-                f.write(gen_commit_pretty(c) + "\n")
+                f.write(gen_commit_pretty(c, rstate=args.blender_rstate) + "\n")
 
     exit_message()
 
diff --git a/utils_build/cmake-flags b/utils_build/cmake-flags
new file mode 100755
index 0000000000000000000000000000000000000000..7ad6e5cbcb344e3f7e90e1b64879fae5e1cc1696
--- /dev/null
+++ b/utils_build/cmake-flags
@@ -0,0 +1,428 @@
+#!/usr/bin/env python3
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+"""
+This tool is for configuring build flags.
+"""
+
+WITH_GUI = True
+
+# ----------------------------------------------------------------------------
+# Data (flags)
+# eg: indervidual flags, compat info.
+
+
+# ----------------------------------------------------------------------------
+# Data (presets)
+# eg: profiling, mudflap, debugging.
+
+# setting: ((add, ...), (remove, ...)), ...
+PRESETS = {
+    "sanitize_address": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=address",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=address",), ()),
+        "CMAKE_EXE_LINKER_FLAGS": (("-lasan",), ()),
+        },
+    "sanitize_leak": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=leak",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=leak",), ()),
+        },
+    "sanitize_undefined": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=undefined",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=undefined",), ()),
+        },
+    "sanitize_thread": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=thread",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=thread",), ()),
+        },
+
+    # GCC5
+    "sanitize_float_divide_by_zero": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=float-divide-by-zero",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=float-divide-by-zero",), ()),
+        },
+    "sanitize_float_cast_overflow": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=float-cast-overflow",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=float-cast-overflow",), ()),
+        },
+    "sanitize_int_overflow": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=signed-integer-overflow",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=signed-integer-overflow",), ()),
+        },
+    "sanitize_bool": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=bool",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=bool",), ()),
+        },
+    "sanitize_enun": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=enum",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=enum",), ()),
+        },
+    "sanitize_bounds": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=bounds",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=bounds",), ()),
+        },
+    "sanitize_bounds_strict": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=bounds-strict",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=bounds-strict",), ()),
+        },
+    "sanitize_vla_bounds": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=vla-bounds",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=vla-bounds",), ()),
+        },
+    "sanitize_alignment": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=alignment",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=alignment",), ()),
+        },
+    "sanitize_object_size": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=object-size",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=object-size",), ()),
+        },
+    "sanitize_nonull_attribute": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=nonnull-attribute",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=nonnull-attribute",), ()),
+        },
+    "sanitize_returns_nonull_attribute": {
+        "CMAKE_CXX_FLAGS": (("-fsanitize=returns-nonnull-attribute",), ()),
+        "CMAKE_C_FLAGS":   (("-fsanitize=returns-nonnull-attribute",), ()),
+        },
+
+    "warn_all": {
+        "CMAKE_CXX_FLAGS": (("-Wall",), ()),
+        "CMAKE_C_FLAGS":   (("-Wall",), ()),
+        },
+    "warn_extra": {
+        "CMAKE_CXX_FLAGS": (("-Wextra",), ()),
+        "CMAKE_C_FLAGS":   (("-Wextra",), ()),
+        },
+    "warn_unused_macros": {
+        "CMAKE_CXX_FLAGS": (("-Wunused-macros",), ()),
+        "CMAKE_C_FLAGS":   (("-Wunused-macros",), ()),
+        },
+    "warn_undefined_macros": {
+        "CMAKE_CXX_FLAGS": (("-Wundef",), ()),
+        "CMAKE_C_FLAGS":   (("-Wundef",), ()),
+        },
+    "warn_unused_local_typedefs": {
+        "CMAKE_CXX_FLAGS": (("-Wunused-local-typedefs",), ()),
+        "CMAKE_C_FLAGS":   (("-Wunused-local-typedefs",), ()),
+        },
+    "warn_pointer_sign": {
+        "CMAKE_CXX_FLAGS": (("",), ()),
+        "CMAKE_C_FLAGS":   (("-Wpointer-sign",), ()),
+        },
+    "warn_sizeof_pointer_memaccess": {
+        "CMAKE_CXX_FLAGS": (("-Wsizeof-pointer-memaccess",), ()),
+        "CMAKE_C_FLAGS":   (("-Wsizeof-pointer-memaccess",), ()),
+        },
+    "warn_no_null": {
+        "CMAKE_CXX_FLAGS": (("-Wnonnull",), ()),
+        "CMAKE_C_FLAGS":   (("-Wnonnull",), ()),
+        },
+    "warn_init_self": {
+        "CMAKE_CXX_FLAGS": (("-Winit-self",), ()),
+        "CMAKE_C_FLAGS":   (("-Winit-self",), ()),
+        },
+    "warn_format": {
+        "CMAKE_CXX_FLAGS": (("-Wformat=2", "-Wno-format-nonliteral", "-Wno-format-y2k"), ()),
+        "CMAKE_C_FLAGS":   (("-Wformat=2", "-Wno-format-nonliteral", "-Wno-format-y2k"), ()),
+        },
+    "warn_format": {
+        "CMAKE_CXX_FLAGS": (("-Wwrite-strings",), ()),
+        "CMAKE_C_FLAGS":   (("-Wwrite-strings",), ()),
+        },
+    "warn_logical_op": {
+        "CMAKE_CXX_FLAGS": (("-Wlogical-op",), ()),
+        "CMAKE_C_FLAGS":   (("-Wlogical-op",), ()),
+        },
+    "warn_error": {
+        "CMAKE_CXX_FLAGS": (("-Werror",), ()),
+        "CMAKE_C_FLAGS":   (("-Werror",), ()),
+        },
+    "warn_shadow": {
+        "CMAKE_CXX_FLAGS": (("-Wshadow", "-Wno-error=shadow"), ()),
+        "CMAKE_C_FLAGS":   (("-Wshadow", "-Wno-error=shadow"), ()),
+        },
+    "warn_missing_include_dirs": {
+        "CMAKE_CXX_FLAGS": (("-Wmissing-include-dirs",), ()),
+        "CMAKE_C_FLAGS":   (("-Wmissing-include-dirs",), ()),
+        },
+    "warn_double_promotion": {
+        "CMAKE_CXX_FLAGS": (("-Wdouble-promotion",), ()),
+        "CMAKE_C_FLAGS":   (("-Wdouble-promotion",), ()),
+        },
+    "warn_declaration_after_statement": {
+        "CMAKE_C_FLAGS":   (("-Wdeclaration-after-statement",), ()),
+        },
+    "warn_zero_as_null_pointer_constant": {
+        "CMAKE_CXX_FLAGS": (("-Wzero-as-null-pointer-constant",), ()),
+        },
+    "show_color": {
+        "CMAKE_C_FLAGS": (("-fdiagnostics-color=always",), ()),
+        "CMAKE_CXX_FLAGS": (("-fdiagnostics-color=always",), ()),
+        },
+
+    # Optimize
+    "optimize_whole_program": {
+        "CMAKE_CXX_FLAGS": (("-flto",), ()),
+        "CMAKE_C_FLAGS":   (("-flto",), ()),
+        "CMAKE_EXE_LINKER_FLAGS": (("-flto", "-fwhole-program",), ()),
+        },
+
+    # Profile
+    "profile_gprof": {
+        "CMAKE_CXX_FLAGS": (("-pg",), ()),
+        "CMAKE_C_FLAGS":   (("-pg",), ()),
+        "CMAKE_EXE_LINKER_FLAGS": (("-pg",), ()),
+        },
+}
+
+# ----------------------------------------------------------------------------
+# Utility Functions
+# eg: check buildsystem (make or ninja?)
+
+
+def cmake_flag_buildtype_suffix(flag, build_type):
+    """
+    Add the build type as a suffix for options that support it.
+    this way for Debug builds we edit debug flags.
+    eg:
+      CMAKE_CXX_FLAGS -> CMAKE_CXX_FLAGS_DEBUG
+    """
+    if build_type == "":
+        return flag
+
+    # perhaps there are more,
+    # but these are default that can have _DEBUG... etc added.
+    if flag in {'CMAKE_CXX_FLAGS',
+                'CMAKE_C_FLAGS',
+                'CMAKE_EXE_LINKER_FLAGS',
+                'CMAKE_MODULE_LINKER_FLAGS',
+                'CMAKE_SHARED_LINKER_FLAGS'}:
+
+        return "%s_%s" % (flag, build_type.upper())
+    else:
+        return flag
+
+
+# ----------------------------------------------------------------------------
+# CMakeCache.txt Parser (simple)
+#
+# These functions should run standalone
+# format in python is as follows...
+#
+# CMakeCache.txt is converted into a dict
+# the key is the cache ID
+# the value is a triple (type, value, description, internal)
+# where the discription is the comment above conforming to the CMake convention.
+#
+#
+def cmakecache_to_py(filepath, native=True):
+    """
+    header, cache
+    """
+
+    cmake_header = ""
+    cmake_cache = {}
+
+    with open(filepath, 'r', encoding='utf-8') as f:
+        lines = f.readlines()
+        for i in range(len(lines)):
+            if lines[i].startswith("#"):
+                cmake_header += lines[i]
+            else:
+                break
+
+        # incase its not set
+        cmake_descr = ""
+        cmake_internal = False
+
+        for i in range(len(lines)):
+            if lines[i].startswith("//"):
+                cmake_descr += lines[i][2:].rstrip() + "\n"
+            elif lines[i].startswith("#"):
+                if "INTERNAL cache entries" in lines[i]:
+                    cmake_internal = True
+            elif lines[i][0].isalpha() or lines[i][0] in {"_", "-"}:
+                cmake_name, cmake_value = lines[i].split("=", 1)
+                if ":" in cmake_name:
+                    cmake_name, cmake_type = cmake_name.split(":", 1)
+                else:
+                    cmake_type = "STRING"
+                cmake_value = cmake_value.rstrip()  # remove trailing '\n'
+                cmake_descr = cmake_descr.rstrip()
+
+                if native:
+                    if cmake_type in {"STRING", "PATH", "FILEPATH", "STATIC", "INTERNAL", "UNINITIALIZED"}:
+                        pass
+                    elif cmake_type == "BOOL":
+                        cmake_value = cmake_value.upper() not in {"NO", "N", "", "OFF", "0", "FALSE"}
+
+                cmake_cache[cmake_name] = (cmake_type, cmake_value, cmake_descr, cmake_internal)
+
+                # incase its not set
+                cmake_descr = ""
+
+    return cmake_header, cmake_cache
+
+
+def cmakecache_from_py(filepath, cmake_header, cmake_cache):
+
+    with open(filepath, 'w', encoding='utf-8') as f:
+        f.write(cmake_header)
+        f.write("\n")
+
+        cmake_cache_list = ([], [])
+
+        # sort into external/internal
+        for cmake_name, (cmake_type, cmake_value, cmake_descr, cmake_internal) in cmake_cache.items():
+            cmake_cache_list[cmake_internal].append((cmake_name, cmake_type, cmake_value, cmake_descr))
+
+        for is_internal, ls in enumerate(cmake_cache_list):
+            ls.sort()
+            if is_internal:
+                f.write("########################\n"
+                        "# INTERNAL cache entries\n"
+                        "########################\n\n")
+            else:
+                f.write("########################\n"
+                        "# EXTERNAL cache entries\n"
+                        "########################\n\n")
+
+            for cmake_name, cmake_type, cmake_value, cmake_descr in ls:
+                if cmake_descr:
+                    l = None
+                    for l in cmake_descr.split("\n"):
+                        f.write("//%s\n" % l)
+                    del l
+
+                # convert the value
+                if cmake_value is True:
+                    cmake_value = "TRUE"
+                elif cmake_value is False:
+                    cmake_value = "FALSE"
+
+                f.write("%s:%s=%s\n" % (cmake_name, cmake_type, cmake_value))
+                if not is_internal:
+                    f.write("\n")
+
+# cmake_header, cmake_cache = cmakecache_to_py("/src/cmake_debug/CMakeCache.txt~")
+# cmakecache_from_py("/src/cmake_debug/CMakeCache.txt", cmake_header, cmake_cache)
+# print(cmake_cache)
+
+
+# ----------------------------------------------------------------------------
+# Main Functions (can be run from command line)
+
+def config_set(config_id, state):
+    print(config_id, state.get(), dir(state))
+    cache = CMAKE_DATA["cmake_cache"]
+    build_type = cache["CMAKE_BUILD_TYPE"][1]  # value
+
+    cfg = PRESETS[config_id]
+    for key, (add, rem) in cfg.items():
+        key = cmake_flag_buildtype_suffix(key, build_type)
+        (cmake_type, cmake_value, cmake_descr, cmake_internal) = cache[key]
+        data = cmake_value.split()
+        if not state.get():
+            add, rem = rem, add
+
+        for arg in rem:
+            data[:] = [i for i in data if i != arg]
+        data.extend(add)
+
+        # print("A", data)
+        cmake_value = " ".join(data)
+        # print("B", cmake_value)
+        cache[key] = (cmake_type, cmake_value, cmake_descr, cmake_internal)
+        print("AFTER", cache[key])
+
+
+def config_check(config_id):
+    cache = CMAKE_DATA["cmake_cache"]
+    build_type = cache["CMAKE_BUILD_TYPE"][1]  # value
+
+    cfg = PRESETS[config_id]
+    for key, (add, rem) in cfg.items():
+        key = cmake_flag_buildtype_suffix(key, build_type)
+        (cmake_type, cmake_value, cmake_descr, cmake_internal) = cache[key]
+        data = cmake_value.split()
+        for arg in add:
+            if arg not in data:
+                return False
+    return True
+
+
+# ----------------------------------------------------------------------------
+# Command Line Interface
+
+# ----------------------------------------------------------------------------
+# User Interface
+if WITH_GUI:
+    CMAKE_DATA = {}
+    # CMAKE_CACHE = "/src/cmake_debugCMakeCache.txt"
+    CMAKE_CACHE = "CMakeCache.txt"
+
+    def cmake_read():
+        print("%s: reading..." % CMAKE_CACHE)
+        (CMAKE_DATA["cmake_header"],
+         CMAKE_DATA["cmake_cache"],
+         ) = cmakecache_to_py(CMAKE_CACHE)
+
+    def cmake_write():
+        print("%s: writing..." % CMAKE_CACHE)
+        CMAKE_CACHE_TMP = CMAKE_CACHE + "~"
+        cmakecache_from_py(CMAKE_CACHE_TMP,
+                           CMAKE_DATA["cmake_header"],
+                           CMAKE_DATA["cmake_cache"])
+
+        import shutil
+        shutil.move(CMAKE_CACHE_TMP, CMAKE_CACHE)
+
+    # read before load.
+    cmake_read()
+
+    import tkinter
+    master = tkinter.Tk()
+
+    row = 0
+    tkinter.Label(master, text="Flags:").grid(row=row, sticky=tkinter.W)
+    row += 1
+
+    def config_but(my_id):
+        global row
+        var = tkinter.IntVar()
+        var.set(config_check(my_id))
+        tkinter.Checkbutton(master,
+                            text=my_id.replace("_", " ").capitalize(),
+                            variable=var,
+                            command=lambda: config_set(my_id, var)).grid(row=row, sticky=tkinter.W)
+        row += 1
+
+    for my_id in sorted(PRESETS.keys()):
+        config_but(my_id)
+
+    tkinter.Button(master, text='Write', command=cmake_write).grid(row=row, sticky=tkinter.W, pady=4)
+    row += 1
+    tkinter.Button(master, text='Quit', command=master.quit).grid(row=row, sticky=tkinter.W, pady=4)
+    row += 1
+    # tkinter.Button(master, text='Show', command=var_states).grid(row=4, sticky=tkinter.W, pady=4)
+
+    tkinter.mainloop()
+
+# EOF
diff --git a/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py b/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py
index effbbdea0c79b3324ab0c4a28de719b6b487aa2c..2c6fdb30ecd2c0973c80c64772e36d37164aadf2 100755
--- a/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py
+++ b/utils_ide/qtcreator/externaltools/qtc_assembler_preview.py
@@ -76,6 +76,7 @@ def find_build_args_make(source):
     return find_arg(source, data)
 
 def main():
+    import re
 
     # currently only supports ninja or makefiles
     build_file_ninja = os.path.join(BUILD_DIR, "build.ninja")
@@ -108,27 +109,39 @@ def main():
         del arg_split[:i + 1] 
 
     if COMPILER_ID == 'GCC':
-        # remove arg pairs
-        for arg, n in (("-o", 2), ("-MF", 2), ("-MT", 2), ("-MMD", 1)):
-            if arg in arg_split:
-                i = arg_split.index(arg)
-                del arg_split[i : i + n]
-
         # --- Switch debug for optimized ---
-        for arg, n in (("-O0", 1),
-                       ("-g", 1), ("-g1", 1), ("-g2", 1), ("-g3", 1),
-                       ("-ggdb", 1), ("-ggdb", 1), ("-ggdb1", 1), ("-ggdb2", 1), ("-ggdb3", 1),
-                       ("-fno-inline", 1),
-                       ("-fno-builtin", 1),
-                       ("-fno-nonansi-builtins", 1),
-                       ("-fno-common", 1),
-                       ("-fsanitize=address", 1),
-                       ("-fsanitize=undefined", 1),
-                       ("-DDEBUG", 1), ("-D_DEBUG", 1),
-                       ):
-            if arg in arg_split:
-                i = arg_split.index(arg)
-                del arg_split[i : i + n]
+        for arg, n in (
+                # regular flags which prevent asm output
+                ("-o", 2),
+                ("-MF", 2),
+                ("-MT", 2),
+                ("-MMD", 1),
+
+                # debug flags
+                ("-O0", 1),
+                (re.compile(r"\-g\d*"), 1),
+                (re.compile(r"\-ggdb\d*"), 1),
+                ("-fno-inline", 1),
+                ("-fno-builtin", 1),
+                ("-fno-nonansi-builtins", 1),
+                ("-fno-common", 1),
+                ("-DDEBUG", 1), ("-D_DEBUG", 1),
+
+                # asan flags
+                (re.compile(r"\-fsanitize=.*"), 1),
+                ):
+            if isinstance(arg, str):
+                # exact string compare
+                while arg in arg_split:
+                    i = arg_split.index(arg)
+                    del arg_split[i : i + n]
+            else:
+                # regex match
+                for i in reversed(range(len(arg_split))):
+                    if arg.match(arg_split[i]):
+                        del arg_split[i : i + n]
+
+
 
         # add optimized args
         arg_split += ["-O3", "-fomit-frame-pointer", "-DNDEBUG", "-Wno-error"]