diff --git a/check_source/check_descriptions.py b/check_source/check_descriptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..35e986d25d7fd965714016551b800cdc69a816b0
--- /dev/null
+++ b/check_source/check_descriptions.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Campbell Barton
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+"""
+this script updates XML themes once new settings are added
+
+  ./blender.bin --background -noaudio --python source/tools/check_source/check_descriptions.py
+"""
+
+import bpy
+
+DUPLICATE_WHITELIST = (
+    # operators
+    ('ACTION_OT_clean', 'GRAPH_OT_clean'),
+    ('ACTION_OT_clickselect', 'GRAPH_OT_clickselect'),
+    ('ACTION_OT_copy', 'GRAPH_OT_copy'),
+    ('ACTION_OT_delete', 'GRAPH_OT_delete'),
+    ('ACTION_OT_duplicate', 'GRAPH_OT_duplicate'),
+    ('ACTION_OT_duplicate_move', 'GRAPH_OT_duplicate_move'),
+    ('ACTION_OT_extrapolation_type', 'GRAPH_OT_extrapolation_type'),
+    ('ACTION_OT_handle_type', 'GRAPH_OT_handle_type'),
+    ('ACTION_OT_interpolation_type', 'GRAPH_OT_interpolation_type'),
+    ('ACTION_OT_keyframe_insert', 'GRAPH_OT_keyframe_insert'),
+    ('ACTION_OT_mirror', 'GRAPH_OT_mirror'),
+    ('ACTION_OT_paste', 'GRAPH_OT_paste'),
+    ('ACTION_OT_sample', 'GRAPH_OT_sample'),
+    ('ACTION_OT_select_all_toggle', 'GRAPH_OT_select_all_toggle'),
+    ('ACTION_OT_select_border', 'GRAPH_OT_select_border'),
+    ('ACTION_OT_select_column', 'GRAPH_OT_select_column'),
+    ('ACTION_OT_select_leftright', 'GRAPH_OT_select_leftright'),
+    ('ACTION_OT_select_less', 'GRAPH_OT_select_less'),
+    ('ACTION_OT_select_linked', 'GRAPH_OT_select_linked'),
+    ('ACTION_OT_select_more', 'GRAPH_OT_select_more'),
+    ('ACTION_OT_view_all', 'CLIP_OT_dopesheet_view_all', 'GRAPH_OT_view_all'),
+    ('ANIM_OT_change_frame', 'CLIP_OT_change_frame'),
+    ('ARMATURE_OT_armature_layers', 'POSE_OT_armature_layers'),
+    ('ARMATURE_OT_autoside_names', 'POSE_OT_autoside_names'),
+    ('ARMATURE_OT_bone_layers', 'POSE_OT_bone_layers'),
+    ('ARMATURE_OT_extrude_forked', 'ARMATURE_OT_extrude_move'),
+    ('ARMATURE_OT_select_all', 'POSE_OT_select_all'),
+    ('ARMATURE_OT_select_hierarchy', 'POSE_OT_select_hierarchy'),
+    ('ARMATURE_OT_select_linked', 'POSE_OT_select_linked'),
+    ('CLIP_OT_cursor_set', 'UV_OT_cursor_set'),
+    ('CLIP_OT_disable_markers', 'CLIP_OT_graph_disable_markers'),
+    ('CLIP_OT_graph_select_border', 'MASK_OT_select_border'),
+    ('CLIP_OT_view_ndof', 'IMAGE_OT_view_ndof'),
+    ('CLIP_OT_view_pan', 'IMAGE_OT_view_pan', 'VIEW2D_OT_pan', 'VIEW3D_OT_view_pan'),
+    ('CLIP_OT_view_zoom', 'VIEW2D_OT_zoom'),
+    ('CLIP_OT_view_zoom_in', 'VIEW2D_OT_zoom_in'),
+    ('CLIP_OT_view_zoom_out', 'VIEW2D_OT_zoom_out'),
+    ('CONSOLE_OT_copy', 'FONT_OT_text_copy', 'TEXT_OT_copy'),
+    ('CONSOLE_OT_delete', 'FONT_OT_delete', 'TEXT_OT_delete'),
+    ('CONSOLE_OT_insert', 'FONT_OT_text_insert', 'TEXT_OT_insert'),
+    ('CONSOLE_OT_paste', 'FONT_OT_text_paste', 'TEXT_OT_paste'),
+    ('CURVE_OT_duplicate', 'MASK_OT_duplicate'),
+    ('CURVE_OT_handle_type_set', 'MASK_OT_handle_type_set'),
+    ('CURVE_OT_switch_direction', 'MASK_OT_switch_direction'),
+    ('FONT_OT_line_break', 'TEXT_OT_line_break'),
+    ('FONT_OT_move', 'TEXT_OT_move'),
+    ('FONT_OT_move_select', 'TEXT_OT_move_select'),
+    ('FONT_OT_text_cut', 'TEXT_OT_cut'),
+    ('GRAPH_OT_properties', 'IMAGE_OT_properties', 'LOGIC_OT_properties', 'NLA_OT_properties'),
+    ('LATTICE_OT_select_ungrouped', 'MESH_OT_select_ungrouped', 'PAINT_OT_vert_select_ungrouped'),
+    ('NODE_OT_add_node', 'NODE_OT_add_search'),
+    ('NODE_OT_move_detach_links', 'NODE_OT_move_detach_links_release'),
+    ('NODE_OT_properties', 'VIEW3D_OT_properties'),
+    ('NODE_OT_toolbar', 'VIEW3D_OT_toolshelf'),
+    ('OBJECT_OT_duplicate_move', 'OBJECT_OT_duplicate_move_linked'),
+    ('WM_OT_context_cycle_enum', 'WM_OT_context_toggle', 'WM_OT_context_toggle_enum'),
+    ('WM_OT_context_set_boolean', 'WM_OT_context_set_enum', 'WM_OT_context_set_float', 'WM_OT_context_set_int', 'WM_OT_context_set_string', 'WM_OT_context_set_value'),
+    )
+
+DUPLICATE_IGNORE = {
+    "",
+    }
+
+
+def check_duplicates():
+    import rna_info
+
+    DUPLICATE_IGNORE_FOUND = set()
+    DUPLICATE_WHITELIST_FOUND = set()
+
+    structs, funcs, ops, props = rna_info.BuildRNAInfo()
+
+    # This is mainly useful for operators,
+    # other types have too many false positives
+
+    #for t in (structs, funcs, ops, props):
+    for t in (ops, ):
+        description_dict = {}
+        print("")
+        for k, v in t.items():
+            if v.description not in DUPLICATE_IGNORE:
+                id_str = ".".join([s if isinstance(s, str) else s.identifier for s in k if s])
+                description_dict.setdefault(v.description, []).append(id_str)
+            else:
+                DUPLICATE_IGNORE_FOUND.add(v.description)
+        # sort for easier viewing
+        sort_ls = [(tuple(sorted(v)), k) for k, v in description_dict.items()]
+        sort_ls.sort()
+
+        for v, k in sort_ls:
+            if len(v) > 1:
+                if v not in DUPLICATE_WHITELIST:
+                    print("found %d: %r, \"%s\"" % (len(v), v, k))
+                    #print("%r," % (v,))
+                else:
+                    DUPLICATE_WHITELIST_FOUND.add(v)
+
+    test = (DUPLICATE_IGNORE - DUPLICATE_IGNORE_FOUND)
+    if test:
+        print("Invalid 'DUPLICATE_IGNORE': %r" % test)
+    test = (set(DUPLICATE_WHITELIST) - DUPLICATE_WHITELIST_FOUND)
+    if test:
+        print("Invalid 'DUPLICATE_WHITELIST': %r" % test)
+
+def main():
+    check_duplicates()
+
+if __name__ == "__main__":
+    main()
diff --git a/check_source/check_spelling_c.py b/check_source/check_spelling_c.py
new file mode 100755
index 0000000000000000000000000000000000000000..d6beec26dbb7b8ae37bc122c8937ba24c81aecb9
--- /dev/null
+++ b/check_source/check_spelling_c.py
@@ -0,0 +1,356 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+"""
+Script for checking source code spelling.
+
+   python3 source/tools/check_source/check_spelling_c.py some_soure_file.py
+
+
+Currently only python source is checked.
+"""
+
+import os
+PRINT_QTC_TASKFORMAT = False
+if "USE_QTC_TASK" in os.environ:
+    PRINT_QTC_TASKFORMAT = True
+
+ONLY_ONCE = True
+USE_COLOR = True
+_only_once_ids = set()
+
+if USE_COLOR:
+    COLOR_WORD = "\033[92m"
+    COLOR_ENDC = "\033[0m"
+else:
+    COLOR_FAIL = ""
+    COLOR_ENDC = ""
+
+
+import enchant
+dict_spelling = enchant.Dict("en_US")
+
+from check_spelling_c_config import (dict_custom,
+                                     dict_ignore,
+                                     )
+
+
+def words_from_text(text):
+    """ Extract words to treat as English for spell checking.
+    """
+    text = text.strip("#'\"")
+    text = text.replace("/", " ")
+    text = text.replace("-", " ")
+    text = text.replace("+", " ")
+    text = text.replace("%", " ")
+    text = text.replace(",", " ")
+    text = text.replace("=", " ")
+    text = text.replace("|", " ")
+    words = text.split()
+
+    # filter words
+    words[:] = [w.strip("*?!:;.,'\"`") for w in words]
+
+    def word_ok(w):
+        # check for empty string
+        if not w:
+            return False
+
+        # ignore all uppercase words
+        if w.isupper():
+            return False
+
+        # check for string with no characters in it
+        is_alpha = False
+        for c in w:
+            if c.isalpha():
+                is_alpha = True
+                break
+        if not is_alpha:
+            return False
+
+        # check for prefix/suffix which render this not a real word
+        # example '--debug', '\n'
+        # TODO, add more
+        if w[0] in "%-+\\@":
+            return False
+
+        # check for code in comments
+        for c in "<>{}[]():._0123456789\&*":
+            if c in w:
+                return False
+
+        # check for words which contain lower case but have upper case
+        # ending chars eg - 'StructRNA', we can ignore these.
+        if len(w) > 1:
+            has_lower = False
+            for c in w:
+                if c.islower():
+                    has_lower = True
+                    break
+            if has_lower and (not w[1:].islower()):
+                return False
+
+        return True
+    words[:] = [w for w in words if word_ok(w)]
+
+    # text = " ".join(words)
+
+    # print(text)
+    return words
+
+
+class Comment:
+    __slots__ = ("file",
+                 "text",
+                 "line",
+                 "type",
+                 )
+
+    def __init__(self, file, text, line, type):
+        self.file = file
+        self.text = text
+        self.line = line
+        self.type = type
+
+    def parse(self):
+        return words_from_text(self.text)
+
+
+def extract_py_comments(filepath):
+
+    import token
+    import tokenize
+
+    source = open(filepath, encoding='utf-8')
+
+    comments = []
+
+    prev_toktype = token.INDENT
+
+    tokgen = tokenize.generate_tokens(source.readline)
+    for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
+        if toktype == token.STRING and prev_toktype == token.INDENT:
+            comments.append(Comment(filepath, ttext, slineno, 'DOCSTRING'))
+        elif toktype == tokenize.COMMENT:
+            # non standard hint for commented CODE that we can ignore
+            if not ttext.startswith("#~"):
+                comments.append(Comment(filepath, ttext, slineno, 'COMMENT'))
+        prev_toktype = toktype
+    return comments
+
+
+def extract_c_comments(filepath):
+    """
+    Extracts comments like this:
+
+        /*
+         * This is a multi-line comment, notice the '*'s are aligned.
+         */
+    """
+    i = 0
+    text = open(filepath, encoding='utf-8').read()
+
+    BEGIN = "/*"
+    END = "*/"
+    TABSIZE = 4
+    SINGLE_LINE = False
+    STRIP_DOXY = True
+    STRIP_DOXY_DIRECTIVES = (
+        r"\section",
+        r"\subsection",
+        r"\subsubsection",
+        r"\ingroup",
+        r"\param",
+        r"\page",
+        )
+    SKIP_COMMENTS = (
+        "BEGIN GPL LICENSE BLOCK",
+        )
+
+    # http://doc.qt.nokia.com/qtcreator-2.4/creator-task-lists.html#task-list-file-format
+    # file\tline\ttype\tdescription
+    # ... > foobar.tasks
+
+    # reverse these to find blocks we won't parse
+    PRINT_NON_ALIGNED = False
+    PRINT_SPELLING = True
+
+    def strip_doxy_comments(block_split):
+
+        for i, l in enumerate(block_split):
+            for directive in STRIP_DOXY_DIRECTIVES:
+                if directive in l:
+                    l_split = l.split()
+                    l_split[l_split.index(directive) + 1] = " "
+                    l = " ".join(l_split)
+                    del l_split
+                    break
+            block_split[i] = l
+
+    comments = []
+
+    while i >= 0:
+        i = text.find(BEGIN, i)
+        if i != -1:
+            i_next = text.find(END, i)
+            if i_next != -1:
+
+                # not essential but seek ack to find beginning of line
+                while i > 0 and text[i - 1] in {"\t", " "}:
+                    i -= 1
+
+                block = text[i:i_next + len(END)]
+
+                # add whitespace in front of the block (for alignment test)
+                ws = []
+                j = i
+                while j > 0 and text[j - 1] != "\n":
+                    ws .append("\t" if text[j - 1] == "\t" else " ")
+                    j -= 1
+                ws.reverse()
+                block = "".join(ws) + block
+
+                ok = True
+
+                if not (SINGLE_LINE or ("\n" in block)):
+                    ok = False
+
+                if ok:
+                    for c in SKIP_COMMENTS:
+                        if c in block:
+                            ok = False
+                            break
+
+                if ok:
+                    # expand tabs
+                    block_split = [l.expandtabs(TABSIZE) for l in block.split("\n")]
+
+                    # now validate that the block is aligned
+                    align_vals = tuple(sorted(set([l.find("*") for l in block_split])))
+                    is_aligned = len(align_vals) == 1
+
+                    if is_aligned:
+                        if PRINT_SPELLING:
+                            if STRIP_DOXY:
+                                strip_doxy_comments(block_split)
+
+                            align = align_vals[0] + 1
+                            block = "\n".join([l[align:] for l in block_split])[:-len(END)]
+
+                            # now strip block and get text
+                            # print(block)
+
+                            # ugh - not nice or fast
+                            slineno = 1 + text.count("\n", 0, i)
+
+                            comments.append(Comment(filepath, block, slineno, 'COMMENT'))
+                    else:
+                        if PRINT_NON_ALIGNED:
+                            lineno = 1 + text.count("\n", 0, i)
+                            if PRINT_QTC_TASKFORMAT:
+                                print("%s\t%d\t%s\t%s" % (filepath, lineno, "comment", align_vals))
+                            else:
+                                print(filepath + ":" + str(lineno) + ":")
+
+            i = i_next
+        else:
+            pass
+
+    return comments
+
+
+def spell_check_comments(filepath):
+
+    if filepath.endswith(".py"):
+        comment_list = extract_py_comments(filepath)
+    else:
+        comment_list = extract_c_comments(filepath)
+
+    for comment in comment_list:
+        for w in comment.parse():
+            #if len(w) < 15:
+            #    continue
+
+            w_lower = w.lower()
+            if w_lower in dict_custom or w_lower in dict_ignore:
+                continue
+
+            if not dict_spelling.check(w):
+
+                if ONLY_ONCE:
+                    if w_lower in _only_once_ids:
+                        continue
+                    else:
+                        _only_once_ids.add(w_lower)
+
+                if PRINT_QTC_TASKFORMAT:
+                    print("%s\t%d\t%s\t%s, suggest (%s)" %
+                          (comment.file,
+                           comment.line,
+                           "comment",
+                           w,
+                           " ".join(dict_spelling.suggest(w)),
+                           ))
+                else:
+                    print("%s:%d: %s%s%s, suggest (%s)" %
+                          (comment.file,
+                           comment.line,
+                           COLOR_WORD,
+                           w,
+                           COLOR_ENDC,
+                           " ".join(dict_spelling.suggest(w)),
+                           ))
+
+
+def spell_check_comments_recursive(dirpath):
+    from os.path import join, splitext
+
+    def source_list(path, filename_check=None):
+        for dirpath, dirnames, filenames in os.walk(path):
+
+            # skip '.svn'
+            if dirpath.startswith("."):
+                continue
+
+            for filename in filenames:
+                filepath = join(dirpath, filename)
+                if filename_check is None or filename_check(filepath):
+                    yield filepath
+
+    def is_source(filename):
+        ext = splitext(filename)[1]
+        return (ext in {".c", ".inl", ".cpp", ".cxx", ".hpp", ".hxx", ".h", ".osl", ".py"})
+
+    for filepath in source_list(dirpath, is_source):
+        spell_check_comments(filepath)
+
+
+import sys
+import os
+
+if __name__ == "__main__":
+    for filepath in sys.argv[1:]:
+        if os.path.isdir(filepath):
+            # recursive search
+            spell_check_comments_recursive(filepath)
+        else:
+            # single file
+            spell_check_comments(filepath)
diff --git a/check_source/check_spelling_c_config.py b/check_source/check_spelling_c_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..a051e5561c59fbaae58cf3982112f64e424dfae5
--- /dev/null
+++ b/check_source/check_spelling_c_config.py
@@ -0,0 +1,222 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# these must be all lower case for comparisons
+
+# correct spelling but ignore
+dict_custom = {
+    "instantiation",
+    "iterable",
+    "prepend",
+    "subclass", "subclasses", "subclassing",
+    "merchantability",
+    "precalculate",
+    "unregister",
+    "unselected",
+    "subdirectory",
+    "decrement",
+    "boolean",
+    "decrementing",
+
+    # python types
+    "str",
+    "enum", "enums",
+    "int", "ints",
+    "tuple", "tuples",
+
+    # python functions
+    "repr",
+    "func",
+
+    # accepted abbreviations
+    "config",
+    "recalc",
+    "addon", "addons",
+    "subdir",
+    "struct", "structs",
+    "lookup", "lookups",
+    "autocomplete",
+    "namespace",
+    "multi",
+    "keyframe", "keyframing",
+    "coord", "coords",
+    "dir",
+    "tooltip",
+
+    # general computer terms
+    "endian",
+    "contructor",
+    "unicode",
+    "jitter",
+    "quantized",
+    "searchable",
+    "metadata",
+    "hashable",
+    "stdin",
+    "stdout",
+    "stdin",
+    "opengl",
+    "boids",
+    "keymap",
+    "voxel", "voxels",
+    "vert", "verts",
+    "euler", "eulers",
+    "booleans",
+    "intrinsics",
+    "XXX",
+    "segfault",
+    "wiki",
+    "foo",
+    "diff",
+    "diffs",
+    "sudo",
+    "http",
+    "url",
+    "usr",
+    "env",
+    "app",
+    "preprocessor",
+
+    # specific computer terms/brands
+    "posix",
+    "unix",
+    "amiga",
+    "netscape",
+    "mozilla",
+    "irix",
+    "kde",
+    "qtcreator",
+    "ack",
+
+    # general computer graphics terms
+    "colinear",
+    "coplanar",
+    "barycentric",
+    "bezier",
+    "fresnel",
+    "radiosity",
+    "reflectance",
+    "specular",
+    "nurbs",
+    "ngon", "ngons",
+    "bicubic",
+    "compositing",
+    "deinterlace",
+    "shader",
+    "shaders",
+    "centroid",
+    "emissive",
+    "quaternions",
+    "lacunarity",
+    "musgrave",
+    "normals",
+    "kerning",
+
+    # blender terms
+    "bmain",
+    "bmesh",
+    "bpy",
+    "bge",
+    "mathutils",
+    "fcurve",
+    "animviz",
+    "animsys",
+    "eekadoodle",
+    "editmode",
+    "obdata",
+    "doctree",
+
+    # should have apostrophe but ignore for now
+    # unless we want to get really picky!
+    "indices",
+    "vertices",
+}
+
+# incorrect spelling but ignore anyway
+dict_ignore = {
+    "tri",
+    "quad",
+    "eg",
+    "ok",
+    "ui",
+    "uv",
+    "arg", "args",
+    "vec",
+    "loc",
+    "dof",
+    "bool",
+    "dupli",
+    "readonly",
+    "filepath",
+    "filepaths",
+    "filename", "filenames",
+    "submodule", "submodules",
+    "dirpath",
+    "x-axis",
+    "y-axis",
+    "z-axis",
+    "a-z",
+    "id-block",
+    "node-trees",
+    "pyflakes",
+    "pylint",
+
+    # acronyms
+    "cpu",
+    "gpu",
+    "nan",
+    "utf",
+    "rgb",
+    "gzip",
+    "ppc",
+    "gpl",
+    "rna",
+    "nla",
+    "api",
+    "rhs",
+    "lhs",
+    "ik",
+    "smpte",
+    "svn",
+    "hg",
+    "gl",
+
+    # extensions
+    "xpm",
+    "xml",
+    "py",
+    "rst",
+
+    # tags
+    "fixme",
+    "todo",
+
+    # sphinx/rst
+    "rtype",
+
+    # slang
+    "hrmf",
+    "automagically",
+
+    # names
+    "jahka",
+    "campbell",
+    "mikkelsen", "morten",
+}
diff --git a/check_source/check_style_c.py b/check_source/check_style_c.py
new file mode 100755
index 0000000000000000000000000000000000000000..887c9dd6bdbb43571090dd9ea71de7b61463931c
--- /dev/null
+++ b/check_source/check_style_c.py
@@ -0,0 +1,1079 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contributor(s): Campbell Barton
+#
+# #**** END GPL LICENSE BLOCK #****
+
+# <pep8 compliant>
+
+"""
+This script runs outside of blender and scans source
+
+   python3 source/tools/check_source/check_source_c.py source/
+"""
+
+import os
+
+from check_style_c_config import IGNORE, IGNORE_DIR, SOURCE_DIR
+IGNORE = tuple([os.path.join(SOURCE_DIR, ig) for ig in IGNORE])
+IGNORE_DIR = tuple([os.path.join(SOURCE_DIR, ig) for ig in IGNORE_DIR])
+WARN_TEXT = False
+
+
+def is_ignore(f):
+    for ig in IGNORE:
+        if f == ig:
+            return True
+    for ig in IGNORE_DIR:
+        if f.startswith(ig):
+            return True
+    return False
+
+print("Scanning:", SOURCE_DIR)
+
+# TODO
+#
+# Add checks for:
+# - macro brace use
+# - line length - in a not-too-annoying way
+#   (allow for long arrays in struct definitions, PyMethodDef for eg)
+
+from pygments import lex  # highlight
+from pygments.lexers import CLexer
+from pygments.formatters import RawTokenFormatter
+
+from pygments.token import Token
+
+import argparse
+
+PRINT_QTC_TASKFORMAT = False
+if "USE_QTC_TASK" in os.environ:
+    PRINT_QTC_TASKFORMAT = True
+
+TAB_SIZE = 4
+LIN_SIZE = 120
+
+global filepath
+tokens = []
+
+
+# could store index here too, then have prev/next methods
+class TokStore:
+    __slots__ = ("type", "text", "line")
+
+    def __init__(self, type, text, line):
+        self.type = type
+        self.text = text
+        self.line = line
+
+
+def tk_range_to_str(a, b, expand_tabs=False):
+    txt = "".join([tokens[i].text for i in range(a, b + 1)])
+    if expand_tabs:
+        txt = txt.expandtabs(TAB_SIZE)
+    return txt
+
+
+def tk_item_is_newline(tok):
+    return tok.type == Token.Text and tok.text.strip("\t ") == "\n"
+
+
+def tk_item_is_ws_newline(tok):
+    return (tok.text == "") or \
+           (tok.type == Token.Text and tok.text.isspace()) or \
+           (tok.type in Token.Comment)
+
+
+def tk_item_is_ws(tok):
+    return (tok.text == "") or \
+           (tok.type == Token.Text and tok.text.strip("\t ") != "\n" and tok.text.isspace()) or \
+           (tok.type in Token.Comment)
+
+
+# also skips comments
+def tk_advance_ws(index, direction):
+    while tk_item_is_ws(tokens[index + direction]) and index > 0:
+        index += direction
+    return index
+
+
+def tk_advance_no_ws(index, direction):
+    index += direction
+    while tk_item_is_ws(tokens[index]) and index > 0:
+        index += direction
+    return index
+
+
+def tk_advance_ws_newline(index, direction):
+    while tk_item_is_ws_newline(tokens[index + direction]) and index > 0:
+        index += direction
+    return index + direction
+
+
+def tk_advance_line_start(index):
+    """ Go the the first non-whitespace token of the line.
+    """
+    while tokens[index].line == tokens[index - 1].line and index > 0:
+        index -= 1
+    return tk_advance_no_ws(index, 1)
+
+
+def tk_advance_line(index, direction):
+    line = tokens[index].line
+    while tokens[index + direction].line == line or tokens[index].text == "\n":
+        index += direction
+    return index
+
+
+def tk_match_backet(index):
+    backet_start = tokens[index].text
+    assert(tokens[index].type == Token.Punctuation)
+    assert(backet_start in "[]{}()")
+
+    if tokens[index].text in "({[":
+        direction = 1
+        backet_end = {"(": ")", "[": "]", "{": "}"}[backet_start]
+    else:
+        direction = -1
+        backet_end = {")": "(", "]": "[", "}": "{"}[backet_start]
+
+    level = 1
+    index_match = index + direction
+    while True:
+        item = tokens[index_match]
+        if item.type == Token.Punctuation:
+            if item.text == backet_start:
+                level += 1
+            elif item.text == backet_end:
+                level -= 1
+                if level == 0:
+                    break
+
+        index_match += direction
+
+    return index_match
+
+
+def tk_index_is_linestart(index):
+    index_prev = tk_advance_ws_newline(index, -1)
+    return tokens[index_prev].line < tokens[index].line
+
+
+def extract_to_linestart(index):
+    ls = []
+    line = tokens[index].line
+    index -= 1
+    while index > 0 and tokens[index].line == line:
+        ls.append(tokens[index].text)
+        index -= 1
+
+    if index != 0:
+        ls.append(tokens[index].text.rsplit("\n", 1)[1])
+
+    ls.reverse()
+    return "".join(ls)
+
+
+def extract_statement_if(index_kw):
+    # assert(tokens[index_kw].text == "if")
+
+    # seek back
+    i = index_kw
+
+    i_start = tk_advance_ws(index_kw - 1, direction=-1)
+
+    # seek forward
+    i_next = tk_advance_ws_newline(index_kw, direction=1)
+
+    # print(tokens[i_next])
+
+    # ignore preprocessor
+    i_linestart = tk_advance_line_start(index_kw)
+    if tokens[i_linestart].text.startswith("#"):
+        return None
+
+    if tokens[i_next].type != Token.Punctuation or tokens[i_next].text != "(":
+        warning("no '(' after '%s'" % tokens[index_kw].text, i_start, i_next)
+        return None
+
+    i_end = tk_match_backet(i_next)
+
+    return (i_start, i_end)
+
+
+def extract_operator(index_op):
+    op_text = ""
+    i = 0
+    while tokens[index_op + i].type == Token.Operator:
+        op_text += tokens[index_op + i].text
+        i += 1
+    return op_text, index_op + (i - 1)
+
+
+def extract_cast(index):
+    # to detect a cast is quite involved... sigh
+    # assert(tokens[index].text == "(")
+
+    # TODO, comment within cast, but thats rare
+    i_start = index
+    i_end = tk_match_backet(index)
+
+    # first check we are not '()'
+    if i_start + 1 == i_end:
+        return None
+
+    # check we have punctuation before the cast
+    i = i_start - 1
+    while tokens[i].text.isspace():
+        i -= 1
+    i_prev_no_ws = i
+    if tokens[i].type in {Token.Keyword, Token.Name}:
+        # avoids  'foo(bar)test'
+        # but not ' = (bar)test'
+        return None
+
+    # validate types
+    tokens_cast = [tokens[i] for i in range(i_start + 1, i_end)]
+    for t in tokens_cast:
+        if t.type == Token.Keyword:
+            return None
+        elif t.type == Token.Operator and t.text != "*":
+            # prevent '(a + b)'
+            # note, we could have '(float(*)[1+2])' but this is unlikely
+            return None
+        elif t.type == Token.Punctuation and t.text not in '()[]':
+            # prevent '(a, b)'
+            return None
+    tokens_cast_strip = []
+    for t in tokens_cast:
+        if t.type in Token.Comment:
+            pass
+        elif t.type == Token.Text and t.text.isspace():
+            pass
+        else:
+            tokens_cast_strip.append(t)
+    # check token order and types
+    if not tokens_cast_strip:
+        return None
+    if tokens_cast_strip[0].type not in {Token.Name, Token.Type, Token.Keyword.Type}:
+        return None
+    t_prev = None
+    for t in tokens_cast_strip[1:]:
+        # prevent identifiers after the first: '(a b)'
+        if t.type in {Token.Keyword.Type, Token.Name, Token.Text}:
+            return None
+        # prevent: '(a * 4)'
+        # allow:   '(a (*)[4])'
+        if t_prev is not None and t_prev.text == "*" and t.type != Token.Punctuation:
+            return None
+        t_prev = t
+    del t_prev
+
+    # debug only
+    '''
+    string = "".join(tokens[i].text for i in range(i_start, i_end + 1))
+    #string = "".join(tokens[i].text for i in range(i_start + 1, i_end))
+    #types = [tokens[i].type for i in range(i_start + 1, i_end)]
+    types = [t.type for t in tokens_cast_strip]
+
+    print("STRING:", string)
+    print("TYPES: ", types)
+    print()
+    '''
+
+    return (i_start, i_end)
+
+
+def warning(message, index_kw_start, index_kw_end):
+    if PRINT_QTC_TASKFORMAT:
+        print("%s\t%d\t%s\t%s" % (filepath, tokens[index_kw_start].line, "comment", message))
+    else:
+        print("%s:%d: warning: %s" % (filepath, tokens[index_kw_start].line, message))
+        if WARN_TEXT:
+            print(tk_range_to_str(index_kw_start, index_kw_end, expand_tabs=True))
+
+
+def warning_lineonly(message, line):
+    if PRINT_QTC_TASKFORMAT:
+        print("%s\t%d\t%s\t%s" % (filepath, line, "comment", message))
+    else:
+        print("%s:%d: warning: %s" % (filepath, line, message))
+
+    # print(tk_range_to_str(index_kw_start, index_kw_end))
+
+
+# ------------------------------------------------------------------
+# Own Blender rules here!
+
+def blender_check_kw_if(index_kw_start, index_kw, index_kw_end):
+
+    # check if we have: 'if('
+    if not tk_item_is_ws(tokens[index_kw + 1]):
+        warning("no white space between '%s('" % tokens[index_kw].text, index_kw_start, index_kw_end)
+
+    # check for: ){
+    index_next = tk_advance_ws_newline(index_kw_end, 1)
+    if tokens[index_next].type == Token.Punctuation and tokens[index_next].text == "{":
+        if not tk_item_is_ws(tokens[index_next - 1]):
+            warning("no white space between trailing bracket '%s (){'" % tokens[index_kw].text, index_kw_start, index_kw_end)
+
+        # check for: if ()
+        #            {
+        # note: if the if statement is multi-line we allow it
+        if     ((tokens[index_kw].line == tokens[index_kw_end].line) and
+                (tokens[index_kw].line == tokens[index_next].line - 1)):
+
+            warning("if body brace on a new line '%s ()\\n{'" % tokens[index_kw].text, index_kw, index_kw_end)
+    else:
+        # no '{' on a multi-line if
+        if tokens[index_kw].line != tokens[index_kw_end].line:
+            warning("multi-line if should use a brace '%s (\\n\\n) statement;'" % tokens[index_kw].text, index_kw, index_kw_end)
+
+    # check for: if (a &&
+    #                b) { ...
+    # brace should be on a newline.
+    if (tokens[index_kw].line != tokens[index_kw_end].line):
+        if tokens[index_kw_end].line == tokens[index_next].line:
+            warning("multi-line should use a on a new line '%s (\\n\\n) {'" % tokens[index_kw].text, index_kw, index_kw_end)
+
+    # check for: if () { ... };
+    #
+    # no need to have semicolon after brace.
+    if tokens[index_next].text == "{":
+        index_final = tk_match_backet(index_next)
+        index_final_step = tk_advance_no_ws(index_final, 1)
+        if tokens[index_final_step].text == ";":
+            warning("semi-colon after brace '%s () { ... };'" % tokens[index_kw].text, index_final_step, index_final_step)
+
+
+def blender_check_kw_else(index_kw):
+    # for 'else if' use the if check.
+    i_next = tk_advance_ws_newline(index_kw, 1)
+
+    # check there is at least one space between:
+    # else{
+    if index_kw + 1 == i_next:
+        warning("else has no space between following brace 'else{'", index_kw, i_next)
+
+    # check if there are more than 1 spaces after else, but nothing after the following brace
+    # else     {
+    #     ...
+    #
+    # check for this case since this is needed sometimes:
+    # else     { a = 1; }
+    if     ((tokens[index_kw].line == tokens[i_next].line) and
+            (tokens[index_kw + 1].type == Token.Text) and
+            (len(tokens[index_kw + 1].text) > 1) and
+            (tokens[index_kw + 1].text.isspace())):
+
+        # check if the next data after { is on a newline
+        i_next_next = tk_advance_ws_newline(i_next, 1)
+        if tokens[i_next].line != tokens[i_next_next].line:
+            warning("unneeded whitespace before brace 'else ... {'", index_kw, i_next)
+
+    # this check only tests for:
+    # else
+    # {
+    # ... which is never OK
+    #
+    # ... except if you have
+    # else
+    # #preprocessor
+    # {
+
+    if tokens[i_next].type == Token.Punctuation and tokens[i_next].text == "{":
+        if tokens[index_kw].line < tokens[i_next].line:
+            # check for preproc
+            i_newline = tk_advance_line(index_kw, 1)
+            if tokens[i_newline].text.startswith("#"):
+                pass
+            else:
+                warning("else body brace on a new line 'else\\n{'", index_kw, i_next)
+
+    # this check only tests for:
+    # else
+    # if
+    # ... which is never OK
+    if tokens[i_next].type == Token.Keyword and tokens[i_next].text == "if":
+        if tokens[index_kw].line < tokens[i_next].line:
+            warning("else if is split by a new line 'else\\nif'", index_kw, i_next)
+
+    # check
+    # } else
+    # ... which is never OK
+    i_prev = tk_advance_no_ws(index_kw, -1)
+    if tokens[i_prev].type == Token.Punctuation and tokens[i_prev].text == "}":
+        if tokens[index_kw].line == tokens[i_prev].line:
+            warning("else has no newline before the brace '} else'", i_prev, index_kw)
+
+
+def blender_check_kw_switch(index_kw_start, index_kw, index_kw_end):
+    # In this function we check the body of the switch
+
+    # switch (value) {
+    # ...
+    # }
+
+    # assert(tokens[index_kw].text == "switch")
+
+    index_next = tk_advance_ws_newline(index_kw_end, 1)
+
+    if tokens[index_next].type == Token.Punctuation and tokens[index_next].text == "{":
+        ws_switch_indent = extract_to_linestart(index_kw)
+
+        if ws_switch_indent.isspace():
+
+            # 'case' should have at least 1 indent.
+            # otherwise expect 2 indent (or more, for nested switches)
+            ws_test = {
+                "case": ws_switch_indent + "\t",
+                "default:": ws_switch_indent + "\t",
+
+                "break": ws_switch_indent + "\t\t",
+                "return": ws_switch_indent + "\t\t",
+                "continue": ws_switch_indent + "\t\t",
+                "goto": ws_switch_indent + "\t\t",
+                }
+
+            index_final = tk_match_backet(index_next)
+
+            case_ls = []
+
+            for i in range(index_next + 1, index_final):
+                # 'default' is seen as a label
+                # print(tokens[i].type, tokens[i].text)
+                if tokens[i].type in {Token.Keyword, Token.Name.Label}:
+                    if tokens[i].text in {"case", "default:", "break", "return", "comtinue", "goto"}:
+                        ws_other_indent = extract_to_linestart(i)
+                        # non ws start - we ignore for now, allow case A: case B: ...
+                        if ws_other_indent.isspace():
+                            ws_test_other = ws_test[tokens[i].text]
+                            if not ws_other_indent.startswith(ws_test_other):
+                                warning("%s is not indented enough" % tokens[i].text, i, i)
+
+                            # assumes correct indentation...
+                            if tokens[i].text in {"case", "default:"}:
+                                if ws_other_indent == ws_test_other:
+                                    case_ls.append(i)
+
+            case_ls.append(index_final - 1)
+
+            # detect correct use of break/return
+            for j in range(len(case_ls) - 1):
+                i_case = case_ls[j]
+                i_end = case_ls[j + 1]
+
+                # detect cascading cases, check there is one line inbetween at least
+                if tokens[i_case].line + 1 < tokens[i_end].line:
+                    ok = False
+
+                    # scan case body backwards
+                    for i in reversed(range(i_case, i_end)):
+                        if tokens[i].type == Token.Punctuation:
+                            if tokens[i].text == "}":
+                                ws_other_indent = extract_to_linestart(i)
+                                if ws_other_indent != ws_test["case"]:
+                                    # break/return _not_ found
+                                    break
+
+                        elif tokens[i].type in Token.Comment:
+                            if tokens[i].text == "/* fall-through */":
+                                ok = True
+                                break
+                            else:
+                                #~ print("Commment '%s'" % tokens[i].text)
+                                pass
+
+
+                        elif tokens[i].type == Token.Keyword:
+                            if tokens[i].text in {"break", "return", "continue", "goto"}:
+                                if tokens[i_case].line == tokens[i].line:
+                                    # Allow for...
+                                    #     case BLAH: var = 1; break;
+                                    # ... possible there is if statements etc, but assume not
+                                    ok = True
+                                    break
+                                else:
+                                    ws_other_indent = extract_to_linestart(i)
+                                    ws_other_indent = ws_other_indent[:len(ws_other_indent) - len(ws_other_indent.lstrip())]
+                                    ws_test_other = ws_test[tokens[i].text]
+                                    if ws_other_indent == ws_test_other:
+                                        ok = True
+                                        break
+                                    else:
+                                        pass
+                                        #~ print("indent mismatch...")
+                                        #~ print("'%s'" % ws_other_indent)
+                                        #~ print("'%s'" % ws_test_other)
+                    if not ok:
+                        warning("case/default statement has no break", i_case, i_end)
+                        #~ print(tk_range_to_str(i_case - 1, i_end - 1, expand_tabs=True))
+        else:
+            warning("switch isn't the first token in the line", index_kw_start, index_kw_end)
+    else:
+        warning("switch brace missing", index_kw_start, index_kw_end)
+
+
+def blender_check_kw_sizeof(index_kw):
+    if tokens[index_kw + 1].text != "(":
+        warning("expected '%s('" % tokens[index_kw].text, index_kw, index_kw + 1)
+
+
+def blender_check_cast(index_kw_start, index_kw_end):
+    # detect: '( float...'
+    if tokens[index_kw_start + 1].text.isspace():
+        warning("cast has space after first bracket '( type...'", index_kw_start, index_kw_end)
+    # detect: '...float )'
+    if tokens[index_kw_end - 1].text.isspace():
+        warning("cast has space before last bracket '... )'", index_kw_start, index_kw_end)
+    # detect no space before operator: '(float*)'
+
+    for i in range(index_kw_start + 1, index_kw_end):
+        if tokens[i].text == "*":
+            # allow: '(*)'
+            if tokens[i - 1].type == Token.Punctuation:
+                pass
+            elif tokens[i - 1].text.isspace():
+                pass
+            else:
+                warning("cast has no preceeding whitespace '(type*)'", index_kw_start, index_kw_end)
+
+
+def blender_check_comma(index_kw):
+    i_next = tk_advance_ws_newline(index_kw, 1)
+
+    # check there is at least one space between:
+    # ,sometext
+    if index_kw + 1 == i_next:
+        warning("comma has no space after it ',sometext'", index_kw, i_next)
+
+    if tokens[index_kw - 1].type == Token.Text and tokens[index_kw - 1].text.isspace():
+        warning("comma space before it 'sometext ,", index_kw, i_next)
+
+
+def blender_check_period(index_kw):
+    # check we're now apart of ...
+    if (tokens[index_kw - 1].text == ".") or (tokens[index_kw + 1].text == "."):
+        return
+
+    # 'a.b'
+    if tokens[index_kw - 1].type == Token.Text and tokens[index_kw - 1].text.isspace():
+        warning("period space before it 'sometext .", index_kw, index_kw)
+    if tokens[index_kw + 1].type == Token.Text and tokens[index_kw + 1].text.isspace():
+        warning("period space after it '. sometext", index_kw, index_kw)
+
+
+def _is_ws_pad(index_start, index_end):
+    return (tokens[index_start - 1].text.isspace() and
+            tokens[index_end + 1].text.isspace())
+
+
+def blender_check_operator(index_start, index_end, op_text, is_cpp):
+    if op_text == "->":
+        # allow compiler to handle
+        return
+
+    if len(op_text) == 1:
+        if op_text in {"+", "-"}:
+            # detect (-a) vs (a - b)
+            if     (not tokens[index_start - 1].text.isspace() and
+                    tokens[index_start - 1].text not in {"[", "(", "{"}):
+                warning("no space before operator '%s'" % op_text, index_start, index_end)
+            if     (not tokens[index_end + 1].text.isspace() and
+                    tokens[index_end + 1].text not in {"]", ")", "}"}):
+                # TODO, needs work to be useful
+                # warning("no space after operator '%s'" % op_text, index_start, index_end)
+                pass
+
+        elif op_text in {"/", "%", "^", "|", "=", "<", ">"}:
+            if not _is_ws_pad(index_start, index_end):
+                if not (is_cpp and ("<" in op_text or ">" in op_text)):
+                    warning("no space around operator '%s'" % op_text, index_start, index_end)
+        elif op_text == "&":
+            pass  # TODO, check if this is a pointer reference or not
+        elif op_text == "*":
+           # This check could be improved, its a bit fuzzy
+            if     ((tokens[index_start - 1].type in Token.Number) or
+                    (tokens[index_start + 1].type in Token.Number)):
+                warning("no space around operator '%s'" % op_text, index_start, index_end)
+            elif not (tokens[index_start - 1].text.isspace() or tokens[index_start - 1].text in {"(", "[", "{"}):
+                warning("no space before operator '%s'" % op_text, index_start, index_end)
+    elif len(op_text) == 2:
+        # todo, remove operator check from `if`
+        if op_text in {"+=", "-=", "*=", "/=", "&=", "|=", "^=",
+                       "&&", "||",
+                       "==", "!=", "<=", ">=",
+                       "<<", ">>",
+                       "%=",
+                       # not operators, pointer mix-ins
+                       ">*", "<*", "-*", "+*", "=*", "/*", "%*", "^*", "|*",
+                       }:
+            if not _is_ws_pad(index_start, index_end):
+                if not (is_cpp and ("<" in op_text or ">" in op_text)):
+                    warning("no space around operator '%s'" % op_text, index_start, index_end)
+
+        elif op_text in {"++", "--"}:
+            pass  # TODO, figure out the side we are adding to!
+            '''
+            if     (tokens[index_start - 1].text.isspace() or
+                    tokens[index_end   + 1].text.isspace()):
+                warning("spaces surrounding operator '%s'" % op_text, index_start, index_end)
+            '''
+        elif op_text in {"!!", "!*"}:
+            # operators we _dont_ want whitespace after (pointers mainly)
+            # we can assume these are pointers
+            if tokens[index_end + 1].text.isspace():
+                warning("spaces after operator '%s'" % op_text, index_start, index_end)
+
+        elif op_text == "**":
+            pass  # handle below
+        elif op_text == "::":
+            pass  # C++, ignore for now
+        elif op_text == ":!*":
+            pass  # ignore for now
+        elif op_text == "*>":
+            pass  # ignore for now, C++ <Class *>
+        else:
+            warning("unhandled operator A '%s'" % op_text, index_start, index_end)
+    else:
+        #warning("unhandled operator B '%s'" % op_text, index_start, index_end)
+        pass
+
+    if len(op_text) > 1:
+        if op_text[0] == "*" and op_text[-1] == "*":
+            if     ((not tokens[index_start - 1].text.isspace()) and
+                    (not tokens[index_start - 1].type == Token.Punctuation)):
+                warning("no space before pointer operator '%s'" % op_text, index_start, index_end)
+            if tokens[index_end + 1].text.isspace():
+                warning("space before pointer operator '%s'" % op_text, index_start, index_end)
+
+    # check if we are first in the line
+    if op_text[0] == "!":
+        # if (a &&
+        #     !b)
+        pass
+    elif op_text[0] == "*" and tokens[index_start + 1].text.isspace() is False:
+        pass  # *a = b
+    elif len(op_text) == 1 and op_text[0] == "-" and tokens[index_start + 1].text.isspace() is False:
+        pass  # -1
+    elif len(op_text) == 2 and op_text == "++" and tokens[index_start + 1].text.isspace() is False:
+        pass  # ++a
+    elif len(op_text) == 2 and op_text == "--" and tokens[index_start + 1].text.isspace() is False:
+        pass  # --a
+    elif len(op_text) == 1 and op_text[0] == "&":
+        # if (a &&
+        #     &b)
+        pass
+    elif len(op_text) == 1 and op_text[0] == "~":
+        # C++
+        # ~ClassName
+        pass
+    elif len(op_text) == 1 and op_text[0] == "?":
+        # (a == b)
+        # ? c : d
+        pass
+    elif len(op_text) == 1 and op_text[0] == ":":
+        # a = b ? c
+        #      : d
+        pass
+    else:
+        if tk_index_is_linestart(index_start):
+            warning("operator starts a new line '%s'" % op_text, index_start, index_end)
+
+
+def blender_check_linelength(index_start, index_end, length):
+    if length > LIN_SIZE:
+        text = tk_range_to_str(index_start, index_end, expand_tabs=True)
+        for l in text.split("\n"):
+            if len(l) > LIN_SIZE:
+                warning("line length %d > %d" % (len(l), LIN_SIZE), index_start, index_end)
+
+
+def blender_check_function_definition(i):
+    # Warning, this is a fairly slow check and guesses
+    # based on some fuzzy rules
+
+    # assert(tokens[index].text == "{")
+
+    # check function declaration is not:
+    #  'void myfunc() {'
+    # ... other uses are handled by checks for statements
+    # this check is rather simplistic but tends to work well enough.
+
+    i_prev = i - 1
+    while tokens[i_prev].text == "":
+        i_prev -= 1
+
+    # ensure this isnt '{' in its own line
+    if tokens[i_prev].line == tokens[i].line:
+
+        # check we '}' isnt on same line...
+        i_next = i + 1
+        found = False
+        while tokens[i_next].line == tokens[i].line:
+            if tokens[i_next].text == "}":
+                found = True
+                break
+            i_next += 1
+        del i_next
+
+        if found is False:
+
+            # First check this isnt an assignment
+            i_prev = tk_advance_no_ws(i, -1)
+            # avoid '= {'
+            #if tokens(index_prev).text != "="
+            # print(tokens[i_prev].text)
+            # allow:
+            # - 'func()[] {'
+            # - 'func() {'
+
+            if tokens[i_prev].text in {")", "]"}:
+                i_prev = i - 1
+                while tokens[i_prev].line == tokens[i].line:
+                    i_prev -= 1
+                split = tokens[i_prev].text.rsplit("\n", 1)
+                if len(split) > 1 and split[-1] != "":
+                    split_line = split[-1]
+                else:
+                    split_line = tokens[i_prev + 1].text
+
+                if split_line and split_line[0].isspace():
+                    pass
+                else:
+                    # no whitespace!
+                    i_begin = i_prev + 1
+
+                    # skip blank
+                    if tokens[i_begin].text == "":
+                        i_begin += 1
+                    # skip static
+                    if tokens[i_begin].text == "static":
+                        i_begin += 1
+                    while tokens[i_begin].text.isspace():
+                        i_begin += 1
+                    # now we are done skipping stuff
+
+                    warning("function's '{' must be on a newline", i_begin, i)
+
+
+def blender_check_brace_indent(i):
+    # assert(tokens[index].text == "{")
+
+    i_match = tk_match_backet(i)
+
+    if tokens[i].line != tokens[i_match].line:
+        ws_i_match = extract_to_linestart(i_match)
+
+        # allow for...
+        # a[] = {1, 2,
+        #        3, 4}
+        # ... so only check braces which are the first text
+        if ws_i_match.isspace():
+            ws_i = extract_to_linestart(i)
+            ws_i_match_lstrip = ws_i_match.lstrip()
+
+            ws_i = ws_i[:len(ws_i) - len(ws_i.lstrip())]
+            ws_i_match = ws_i_match[:len(ws_i_match) - len(ws_i_match_lstrip)]
+            if ws_i != ws_i_match:
+                warning("indentation '{' does not match brace", i, i_match)
+
+
+def quick_check_indentation(lines):
+    """
+    Quick check for multiple tab indents.
+    """
+    t_prev = -1
+    m_comment_prev = False
+    ls_prev = ""
+
+    for i, l in enumerate(lines):
+        skip = False
+
+        # skip blank lines
+        ls = l.strip()
+
+        # comment or pre-processor
+        if ls:
+            # #ifdef ... or ... // comment
+            if (ls[0] == "#" or ls[0:2] == "//"):
+                skip = True
+            # label:
+            elif (':' in ls and l[0] != '\t'):
+                skip = True
+            # /* comment */
+            #~ elif ls.startswith("/*") and ls.endswith("*/"):
+            #~     skip = True
+            # /* some comment...
+            elif ls.startswith("/*"):
+                skip = True
+            # line ending a comment: */
+            elif ls == "*/":
+                skip = True
+            # * middle of multi line comment block
+            elif ls.startswith("* "):
+                skip = True
+            # exclude muli-line defines
+            elif ls.endswith("\\") or ls.endswith("(void)0") or ls_prev.endswith("\\"):
+                skip = True
+
+        ls_prev = ls
+
+        if skip:
+            continue
+
+        if ls:
+            ls = l.lstrip("\t")
+            tabs = l[:len(l) - len(ls)]
+            t = len(tabs)
+            if (t > t_prev + 1) and (t_prev != -1):
+                warning_lineonly("indentation mis-match (indent of %d) '%s'" % (t - t_prev, tabs), i + 1)
+            t_prev = t
+
+import re
+re_ifndef = re.compile("^\s*#\s*ifndef\s+([A-z0-9_]+).*$")
+re_define = re.compile("^\s*#\s*define\s+([A-z0-9_]+).*$")
+
+def quick_check_include_guard(lines):
+    found = 0
+    def_value = ""
+    ok = False
+
+    def fn_as_guard(fn):
+        name = os.path.basename(fn).upper().replace(".", "_").replace("-", "_")
+        return "__%s__" % name
+
+    for i, l in enumerate(lines):
+        ndef_match = re_ifndef.match(l)
+        if ndef_match:
+            ndef_value = ndef_match.group(1).strip()
+            for j in range(i + 1, len(lines)):
+                l_next = lines[j]
+                def_match = re_define.match(l_next)
+                if def_match:
+                    def_value = def_match.group(1).strip()
+                    if def_value == ndef_value:
+                        ok = True
+                        break
+                elif l_next.strip():
+                    # print(filepath)
+                    # found non empty non ndef line. quit
+                    break
+                else:
+                    # allow blank lines
+                    pass
+            break
+
+    guard = fn_as_guard(filepath)
+
+    if ok:
+        # print("found:", def_value, "->", filepath)
+        if def_value != guard:
+            # print("%s: %s -> %s" % (filepath, def_value, guard))
+            warning_lineonly("non-conforming include guard (found %r, expected %r)" % (def_value, guard), i + 1)
+    else:
+        warning_lineonly("missing include guard %r" % guard, 1)
+
+def quick_check_source(fp, code, args):
+
+    global filepath
+
+    is_header = fp.endswith((".h", ".hxx", ".hpp"))
+
+    filepath = fp
+
+    lines = code.split("\n")
+
+    if is_header:
+        quick_check_include_guard(lines)
+
+    quick_check_indentation(lines)
+
+def scan_source(fp, code, args):
+    # print("scanning: %r" % fp)
+
+    global filepath
+
+    is_cpp = fp.endswith((".cpp", ".cxx"))
+
+    filepath = fp
+
+    #if "displist.c" not in filepath:
+    #    return
+
+    filepath_base = os.path.basename(filepath)
+
+    #print(highlight(code, CLexer(), RawTokenFormatter()).decode('utf-8'))
+
+    del tokens[:]
+    line = 1
+
+    for ttype, text in lex(code, CLexer()):
+        if text:
+            tokens.append(TokStore(ttype, text, line))
+            line += text.count("\n")
+
+    col = 0  # track line length
+    index_line_start = 0
+
+    for i, tok in enumerate(tokens):
+        #print(tok.type, tok.text)
+        if tok.type == Token.Keyword:
+            if tok.text in {"switch", "while", "if", "for"}:
+                item_range = extract_statement_if(i)
+                if item_range is not None:
+                    blender_check_kw_if(item_range[0], i, item_range[1])
+                if tok.text == "switch":
+                    blender_check_kw_switch(item_range[0], i, item_range[1])
+            elif tok.text == "else":
+                blender_check_kw_else(i)
+            elif tok.text == "sizeof":
+                blender_check_kw_sizeof(i)
+        elif tok.type == Token.Punctuation:
+            if tok.text == ",":
+                blender_check_comma(i)
+            elif tok.text == ".":
+                blender_check_period(i)
+            elif tok.text == "[":
+                # note, we're quite relaxed about this but
+                # disallow 'foo ['
+                if tokens[i - 1].text.isspace():
+                    if is_cpp and tokens[i + 1].text == "]":
+                        # c++ can do delete []
+                        pass
+                    else:
+                        warning("space before '['", i, i)
+            elif tok.text == "(":
+                # check if this is a cast, eg:
+                #  (char), (char **), (float (*)[3])
+                item_range = extract_cast(i)
+                if item_range is not None:
+                    blender_check_cast(item_range[0], item_range[1])
+            elif tok.text == "{":
+                # check matching brace is indented correctly (slow!)
+                blender_check_brace_indent(i)
+
+                # check previous character is either a '{' or whitespace.
+                if (tokens[i - 1].line == tok.line) and not (tokens[i - 1].text.isspace() or tokens[i - 1].text == "{"):
+                    warning("no space before '{'", i, i)
+
+                blender_check_function_definition(i)
+
+        elif tok.type == Token.Operator:
+            # we check these in pairs, only want first
+            if tokens[i - 1].type != Token.Operator:
+                op, index_kw_end = extract_operator(i)
+                blender_check_operator(i, index_kw_end, op, is_cpp)
+        elif tok.type in Token.Comment:
+            doxyfn = None
+            if "\\file" in tok.text:
+                doxyfn = tok.text.split("\\file", 1)[1].strip().split()[0]
+            elif "@file" in tok.text:
+                doxyfn = tok.text.split("@file", 1)[1].strip().split()[0]
+
+            if doxyfn is not None:
+                doxyfn_base = os.path.basename(doxyfn)
+                if doxyfn_base != filepath_base:
+                    warning("doxygen filename mismatch %s != %s" % (doxyfn_base, filepath_base), i, i)
+
+        # ensure line length
+        if (not args.no_length_check) and tok.type == Token.Text and tok.text == "\n":
+            # check line len
+            blender_check_linelength(index_line_start, i - 1, col)
+
+            col = 0
+            index_line_start = i + 1
+        else:
+            col += len(tok.text.expandtabs(TAB_SIZE))
+
+        #elif tok.type == Token.Name:
+        #    print(tok.text)
+
+        #print(ttype, type(ttype))
+        #print((ttype, value))
+
+    #for ttype, value in la:
+    #    #print(value, end="")
+
+
+def scan_source_filepath(filepath, args):
+    # for quick tests
+    #~ if not filepath.endswith("creator.c"):
+    #~     return
+
+    code = open(filepath, 'r', encoding="utf-8").read()
+
+    # fast checks which don't require full parsing
+    quick_check_source(filepath, code, args)
+
+    # use lexer
+    scan_source(filepath, code, args)
+
+
+def scan_source_recursive(dirpath, args):
+    import os
+    from os.path import join, splitext
+
+    def source_list(path, filename_check=None):
+        for dirpath, dirnames, filenames in os.walk(path):
+
+            # skip '.svn'
+            if dirpath.startswith("."):
+                continue
+
+            for filename in filenames:
+                filepath = join(dirpath, filename)
+                if filename_check is None or filename_check(filepath):
+                    yield filepath
+
+    def is_source(filename):
+        ext = splitext(filename)[1]
+        return (ext in {".c", ".inl", ".cpp", ".cxx", ".hpp", ".hxx", ".h", ".osl"})
+
+    for filepath in sorted(source_list(dirpath, is_source)):
+        if is_ignore(filepath):
+            continue
+
+        scan_source_filepath(filepath, args)
+
+
+if __name__ == "__main__":
+    import sys
+    import os
+
+    desc = 'Check C/C++ code for conformance with blenders style guide:\nhttp://wiki.blender.org/index.php/Dev:Doc/CodeStyle)'
+    parser = argparse.ArgumentParser(description=desc)
+    parser.add_argument("paths", nargs='+', help="list of files or directories to check")
+    parser.add_argument("-l", "--no-length-check", action="store_true",
+                        help="skip warnings for long lines")
+    args = parser.parse_args()
+
+    if 0:
+        SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))))
+        #scan_source_recursive(os.path.join(SOURCE_DIR, "source", "blender", "bmesh"))
+        scan_source_recursive(os.path.join(SOURCE_DIR, "source/blender/makesrna/intern"), args)
+        sys.exit(0)
+
+    for filepath in args.paths:
+        if os.path.isdir(filepath):
+            # recursive search
+            scan_source_recursive(filepath, args)
+        else:
+            # single file
+            scan_source_filepath(filepath, args)
diff --git a/check_source/check_style_c_config.py b/check_source/check_style_c_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..762c41b600e163a8c662bd44abcdec080fd1c6f1
--- /dev/null
+++ b/check_source/check_style_c_config.py
@@ -0,0 +1,46 @@
+import os
+
+IGNORE = (
+
+    # particles
+    "source/blender/blenkernel/intern/boids.c",
+    "source/blender/blenkernel/intern/cloth.c",
+    "source/blender/blenkernel/intern/collision.c",
+    "source/blender/blenkernel/intern/effect.c",
+    "source/blender/blenkernel/intern/implicit.c",
+    "source/blender/blenkernel/intern/particle.c",
+    "source/blender/blenkernel/intern/particle_system.c",
+    "source/blender/blenkernel/intern/pointcache.c",
+    "source/blender/blenkernel/intern/sca.c",
+    "source/blender/blenkernel/intern/softbody.c",
+    "source/blender/blenkernel/intern/smoke.c",
+
+    "source/blender/blenlib/intern/fnmatch.c",
+    "source/blender/blenlib/intern/md5.c",
+    "source/blender/blenlib/intern/voxel.c",
+
+    "source/blender/blenloader/intern/readfile.c",
+    "source/blender/blenloader/intern/versioning_250.c",
+    "source/blender/blenloader/intern/versioning_legacy.c",
+    "source/blender/blenloader/intern/writefile.c",
+
+    "source/blender/editors/space_logic/logic_buttons.c",
+    "source/blender/editors/space_logic/logic_window.c",
+    
+    "source/blender/imbuf/intern/dds/DirectDrawSurface.cpp",
+
+    "source/blender/opencl/intern/clew.c",
+    "source/blender/opencl/intern/clew.h",
+    )
+
+IGNORE_DIR = (
+    "source/blender/collada",
+    "source/blender/render",
+    "source/blender/editors/physics",
+    "source/blender/editors/space_logic",
+    "source/blender/freestyle",
+    "source/blender/gpu",
+    )
+
+
+SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))))