Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
# print(tokens[i_prev].text)
# allow:
# - 'func()[] {'
# - 'func() {'
if tokens[i_prev].text in {")", "]"}:
i_prev = i - 1
while tokens[i_prev].line == tokens[i].line:
i_prev -= 1
split = tokens[i_prev].text.rsplit("\n", 1)
if len(split) > 1 and split[-1] != "":
split_line = split[-1]
else:
split_line = tokens[i_prev + 1].text
if split_line and split_line[0].isspace():
pass
else:
# no whitespace!
i_begin = i_prev + 1
# skip blank
if tokens[i_begin].text == "":
i_begin += 1
# skip static
if tokens[i_begin].text == "static":
i_begin += 1
while tokens[i_begin].text.isspace():
i_begin += 1
# now we are done skipping stuff
warning(fn, "E101", "function's '{' must be on a newline", i_begin, i)
def blender_check_brace_indent(fn, i):
# assert(tokens[index].text == "{")
i_match = tk_match_backet(i)
if tokens[i].line != tokens[i_match].line:
ws_i_match = extract_to_linestart(i_match)
# allow for...
# a[] = {1, 2,
# 3, 4}
# ... so only check braces which are the first text
if ws_i_match.isspace():
ws_i = extract_to_linestart(i)
ws_i_match_lstrip = ws_i_match.lstrip()
ws_i = ws_i[:len(ws_i) - len(ws_i.lstrip())]
ws_i_match = ws_i_match[:len(ws_i_match) - len(ws_i_match_lstrip)]
if ws_i != ws_i_match:
warning(fn, "E104", "indentation '{' does not match brace", i, i_match)
def quick_check_includes(fn, lines):
# Find overly relative headers (could check other things here too...)
# header dupes
inc = set()
base = os.path.dirname(filepath)
for i, l in enumerate(lines):
m = match(l)
if m is not None:
l_header = m.group(1)
# check if the include is overly relative
if l_header.startswith("../") and 0:
l_header_full = os.path.join(base, l_header)
l_header_full = os.path.normpath(l_header_full)
if os.path.exists(l_header_full):
l_header_rel = os.path.relpath(l_header_full, base)
if l_header.count("/") != l_header_rel.count("/"):
warning_lineonly(fn, "E170", "overly relative include %r" % l_header, i + 1)
# check if we're in mode than once
len_inc = len(inc)
inc.add(l_header)
if len(inc) == len_inc:
warning_lineonly(fn, "E171", "duplicate includes %r" % l_header, i + 1)
quick_check_includes.re_inc_match = re.compile(r"\s*#\s*include\s+\"([a-zA-Z0-9_\-\.\/]+)\"").match
def quick_check_indentation(fn, lines):
"""
Quick check for multiple tab indents.
"""
t_prev = -1
ls_prev = ""
ws_prev = ""
ws_prev_expand = ""
for i, l in enumerate(lines):
skip = False
# skip blank lines
ls = l.strip()
# comment or pre-processor
if ls:
# #ifdef ... or ... // comment
if ls[0] == "#":
# check preprocessor indentation here
# basic rules, NEVER INDENT
# just need to check multi-line macros.
if l[0] != "#":
# we have indent, check previous line
if not ls_prev.rstrip().endswith("\\"):
# report indented line
warning_lineonly(
fn, "E145", "indentation found with preprocessor "
"(expected none or after '#')", i + 1)
# label:
elif (':' in ls and l[0] != '\t'):
skip = True
# /* comment */
# ~ elif ls.startswith("/*") and ls.endswith("*/"):
# ~ skip = True
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
# /* some comment...
elif ls.startswith("/*"):
skip = True
# line ending a comment: */
elif ls == "*/":
skip = True
# * middle of multi line comment block
elif ls.startswith("* "):
skip = True
# exclude muli-line defines
elif ls.endswith("\\") or ls.endswith("(void)0") or ls_prev.endswith("\\"):
skip = True
ls_prev = ls
if skip:
continue
if ls:
ls = l.lstrip("\t")
tabs = l[:len(l) - len(ls)]
t = len(tabs)
if (t > t_prev + 1) and (t_prev != -1):
warning_lineonly(
fn, "E146", "indentation mismatch (indent of %d) '%s'" %
(t - t_prev, tabs), i + 1)
# check for same indentation with different space/tab mix
ws = l[:len(l) - len(l.lstrip())]
ws_expand = ws.expandtabs(4)
if ws_expand == ws_prev_expand:
if ws != ws_prev:
warning_lineonly(
fn, "E152", "indentation tab/space mismatch",
i + 1)
ws_prev = ws
ws_prev_expand = ws_expand
re_ifndef = re.compile(r"^\s*#\s*ifndef\s+([A-z0-9_]+).*$")
re_define = re.compile(r"^\s*#\s*define\s+([A-z0-9_]+).*$")
def quick_check_include_guard(fn, lines):
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
def_value = ""
ok = False
def fn_as_guard(fn):
name = os.path.basename(fn).upper().replace(".", "_").replace("-", "_")
return "__%s__" % name
for i, l in enumerate(lines):
ndef_match = re_ifndef.match(l)
if ndef_match:
ndef_value = ndef_match.group(1).strip()
for j in range(i + 1, len(lines)):
l_next = lines[j]
def_match = re_define.match(l_next)
if def_match:
def_value = def_match.group(1).strip()
if def_value == ndef_value:
ok = True
break
elif l_next.strip():
# print(filepath)
# found non empty non ndef line. quit
break
else:
# allow blank lines
pass
break
guard = fn_as_guard(filepath)
if ok:
# print("found:", def_value, "->", filepath)
if def_value != guard:
# print("%s: %s -> %s" % (filepath, def_value, guard))
warning_lineonly(
fn, "E147", "non-conforming include guard (found %r, expected %r)" %
(def_value, guard), i + 1)
warning_lineonly(
fn, "E148", "missing include guard %r" % guard, 1)
def quick_check_source(fp, code, args, fn):
global filepath
is_header = fp.endswith((".h", ".hxx", ".hpp"))
filepath = fp
lines = code.split("\n")
if is_header:
quick_check_include_guard(fn, lines)
quick_check_includes(fn, lines)
quick_check_indentation(fn, lines)
def scan_source(fp, code, args, fn):
# print("scanning: %r" % fp)
global filepath
is_cpp = fp.endswith((".cpp", ".cxx"))
filepath = fp
# if "displist.c" not in filepath:
# return
filepath_base = os.path.basename(filepath)
filepath_split = filepath.split(os.sep)
# print(highlight(code, CLexer(), RawTokenFormatter()).decode('utf-8'))
del tokens[:]
line = 1
for ttype, text in lex(code, CLexer()):
if text:
tokens.append(TokStore(ttype, text, line))
line += text.count("\n")
col = 0 # track line length
index_line_start = 0
for i, tok in enumerate(tokens):
if tok.type == Token.Keyword:
if tok.text in {"switch", "while", "if", "for"}:
item_range = extract_statement_if(fn, i)
if item_range is not None:
blender_check_kw_if(fn, item_range[0], i, item_range[1])
if tok.text == "switch":
blender_check_kw_switch(fn, item_range[0], i, item_range[1])
elif tok.text == "else":
blender_check_kw_else(fn, i)
elif tok.text == "sizeof":
blender_check_kw_sizeof(fn, i)
elif tok.type == Token.Punctuation:
if tok.text == ",":
blender_check_comma(fn, i)
elif tok.text == ".":
blender_check_period(fn, i)
elif tok.text == "[":
# note, we're quite relaxed about this but
# disallow 'foo ['
if tokens[i - 1].text.isspace():
if is_cpp and tokens[i + 1].text == "]":
# c++ can do delete []
pass
else:
warning(fn, "E149", "space before '['", i, i)
elif tok.text == "(":
# check if this is a cast, eg:
# (char), (char **), (float (*)[3])
item_range = extract_cast(i)
if item_range is not None:
blender_check_cast(fn, item_range[0], item_range[1])
elif tok.text == "{":
# check matching brace is indented correctly (slow!)
blender_check_brace_indent(fn, i)
# check previous character is either a '{' or whitespace.
not (tokens[i - 1].text.isspace() or
tokens[i - 1].text == "{" or
tokens[i - 1].flag & IS_CAST)
):
warning(fn, "E150", "no space before '{'", i, i)
blender_check_function_definition(fn, i)
elif tok.type == Token.Operator:
# we check these in pairs, only want first
if tokens[i - 1].type != Token.Operator:
op, index_kw_end = extract_operator(i)
blender_check_operator(fn, i, index_kw_end, op, is_cpp)
elif tok.type in Token.Comment:
doxyfn = None
if "\\file" in tok.text:
doxyfn = tok.text.split("\\file", 1)[1].strip().split()[0]
elif "@file" in tok.text:
doxyfn = tok.text.split("@file", 1)[1].strip().split()[0]
if doxyfn is not None:
doxyfn_base = os.path.basename(doxyfn)
if doxyfn_base != filepath_base:
warning(fn, "E151", "doxygen filename mismatch %s != %s" % (doxyfn_base, filepath_base), i, i)
doxyfn_split = doxyfn.split("/")
if len(doxyfn_split) > 1:
fp_split = filepath_split[-len(doxyfn_split):]
if doxyfn_split != fp_split:
warning(fn, "E151", "doxygen filepath mismatch %s != %s" % (doxyfn, "/".join(fp_split)), i, i)
del fp_split
del doxyfn_base, doxyfn_split
del doxyfn
# ensure line length
if (not args.no_length_check) and tok.type == Token.Text and tok.text == "\n":
# check line len
blender_check_linelength(fn, index_line_start, i - 1, col)
col = 0
index_line_start = i + 1
else:
col += len(tok.text.expandtabs(TAB_SIZE))
# print(tok.text)
# print(ttype, type(ttype))
# print((ttype, value))
# #print(value, end="")
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
def scan_source_filepath__cached(filepath, args, fn, cache):
cache_files_src, cache_files_dst = cache
h_code = hash_of_file(filepath)
info_src = cache_files_src.get(filepath)
if info_src is not None and info_src[0] == h_code:
for a in info_src[1]:
fn(*a)
cache_files_dst[filepath] = info_src
return None
else:
lines = []
info_dst = (h_code, lines)
def fn_wrap(*a):
fn_wrap.lines.append(a)
fn_wrap.fn(*a)
fn_wrap.fn = fn
fn_wrap.lines = lines
cache_files_dst[filepath] = info_dst
return cache_files_src, cache_files_dst, fn_wrap
def scan_source_filepath(filepath, args, fn, cache=None):
# ~ if not filepath.endswith("creator.c"):
# ~ return
code = open(filepath, 'r', encoding="utf-8").read()
if cache:
cache_result = scan_source_filepath__cached(filepath, args, fn, cache)
if cache_result is None:
# No need to ececute
return
else:
cache_files_src, cache_files_dst, fn = cache_result
del cache_result
# fast checks which don't require full parsing
quick_check_source(filepath, code, args, fn)
scan_source(filepath, code, args, fn)
def scan_source_recursive(dirpath, args, fn, cache=None):
import os
from os.path import join, splitext
def source_list(path, filename_check=None):
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git'
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
for filename in filenames:
# avoid scanning backup files
if not filename.startswith("."):
filepath = join(dirpath, filename)
if filename_check is None or filename_check(filepath):
yield filepath
def is_source(filename):
# skip temp files
if filename.startswith("."):
return False
ext = splitext(filename)[1]
return (ext in {".c", ".inl", ".cpp", ".cxx", ".cc", ".hpp", ".hxx", ".h", ".hh", ".osl"})
for filepath in sorted(source_list(dirpath, is_source)):
if is_ignore(filepath):
continue
scan_source_filepath(filepath, args, fn, cache)
def create_parser():
parser = argparse.ArgumentParser(
"Check C/C++ code for conformance with blenders style guide:\n"
"http://wiki.blender.org/index.php/Dev:Doc/CodeStyle)")
"paths",
nargs='+',
help="list of files or directories to check",
)
"-l",
"--no-length-check",
action="store_true",
help="skip warnings for long lines",
)
return parser
def main(argv=None):
import sys
import os
# For cache
import pickle
if argv is None:
argv = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(argv)
del argv
print("Scanning:", SOURCE_DIR)
# SOURCE_DIR = os.path.normpath(
# os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))))
# scan_source_recursive(os.path.join(SOURCE_DIR, "source", "blender", "bmesh"))
scan_source_recursive(os.path.join(SOURCE_DIR, "source/blender/makesrna/intern"), args)
sys.exit(0)
cache_filename = os.environ.get("CHECK_STYLE_C_CACHE", "")
if cache_filename:
if os.path.exists(cache_filename):
with open(cache_filename, 'rb') as fh:
(hash_of_script_src, cache_files_src) = pickle.load(fh)
else:
hash_of_script_src = b''
cache_files_dst = {}
hash_of_script_dst = hash_of_file(__file__)
# If we change the Python code, ignore old cache.
if hash_of_script_src != hash_of_script_dst:
cache_files_src = {}
cache = (cache_files_src, cache_files_dst)
else:
cache = None
for filepath in args.paths:
if os.path.isdir(filepath):
# recursive search
scan_source_recursive(filepath, args, print, cache)
else:
# single file
scan_source_filepath(filepath, args, print, cache)
if cache_filename:
with open(cache_filename, 'wb') as fh:
pickle.dump((hash_of_script_dst, cache_files_dst), fh)
if __name__ == "__main__":
main()