Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
# label:
elif (':' in ls and l[0] != '\t'):
skip = True
# /* comment */
#~ elif ls.startswith("/*") and ls.endswith("*/"):
#~ skip = True
# /* some comment...
elif ls.startswith("/*"):
skip = True
# line ending a comment: */
elif ls == "*/":
skip = True
# * middle of multi line comment block
elif ls.startswith("* "):
skip = True
# exclude muli-line defines
elif ls.endswith("\\") or ls.endswith("(void)0") or ls_prev.endswith("\\"):
skip = True
ls_prev = ls
if skip:
continue
if ls:
ls = l.lstrip("\t")
tabs = l[:len(l) - len(ls)]
t = len(tabs)
if (t > t_prev + 1) and (t_prev != -1):
warning_lineonly("E146", "indentation mis-match (indent of %d) '%s'" %
(t - t_prev, tabs), i + 1)
t_prev = t
import re
re_ifndef = re.compile("^\s*#\s*ifndef\s+([A-z0-9_]+).*$")
re_define = re.compile("^\s*#\s*define\s+([A-z0-9_]+).*$")
def quick_check_include_guard(lines):
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
def_value = ""
ok = False
def fn_as_guard(fn):
name = os.path.basename(fn).upper().replace(".", "_").replace("-", "_")
return "__%s__" % name
for i, l in enumerate(lines):
ndef_match = re_ifndef.match(l)
if ndef_match:
ndef_value = ndef_match.group(1).strip()
for j in range(i + 1, len(lines)):
l_next = lines[j]
def_match = re_define.match(l_next)
if def_match:
def_value = def_match.group(1).strip()
if def_value == ndef_value:
ok = True
break
elif l_next.strip():
# print(filepath)
# found non empty non ndef line. quit
break
else:
# allow blank lines
pass
break
guard = fn_as_guard(filepath)
if ok:
# print("found:", def_value, "->", filepath)
if def_value != guard:
# print("%s: %s -> %s" % (filepath, def_value, guard))
warning_lineonly("E147", "non-conforming include guard (found %r, expected %r)" %
(def_value, guard), i + 1)
warning_lineonly("E148", "missing include guard %r" % guard, 1)
def quick_check_source(fp, code, args):
global filepath
is_header = fp.endswith((".h", ".hxx", ".hpp"))
filepath = fp
lines = code.split("\n")
if is_header:
quick_check_include_guard(lines)
quick_check_indentation(lines)
def scan_source(fp, code, args):
# print("scanning: %r" % fp)
global filepath
is_cpp = fp.endswith((".cpp", ".cxx"))
filepath = fp
# if "displist.c" not in filepath:
# return
filepath_base = os.path.basename(filepath)
filepath_split = filepath.split(os.sep)
# print(highlight(code, CLexer(), RawTokenFormatter()).decode('utf-8'))
del tokens[:]
line = 1
for ttype, text in lex(code, CLexer()):
if text:
tokens.append(TokStore(ttype, text, line))
line += text.count("\n")
col = 0 # track line length
index_line_start = 0
for i, tok in enumerate(tokens):
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
if tok.type == Token.Keyword:
if tok.text in {"switch", "while", "if", "for"}:
item_range = extract_statement_if(i)
if item_range is not None:
blender_check_kw_if(item_range[0], i, item_range[1])
if tok.text == "switch":
blender_check_kw_switch(item_range[0], i, item_range[1])
elif tok.text == "else":
blender_check_kw_else(i)
elif tok.text == "sizeof":
blender_check_kw_sizeof(i)
elif tok.type == Token.Punctuation:
if tok.text == ",":
blender_check_comma(i)
elif tok.text == ".":
blender_check_period(i)
elif tok.text == "[":
# note, we're quite relaxed about this but
# disallow 'foo ['
if tokens[i - 1].text.isspace():
if is_cpp and tokens[i + 1].text == "]":
# c++ can do delete []
pass
else:
warning("E149", "space before '['", i, i)
elif tok.text == "(":
# check if this is a cast, eg:
# (char), (char **), (float (*)[3])
item_range = extract_cast(i)
if item_range is not None:
blender_check_cast(item_range[0], item_range[1])
elif tok.text == "{":
# check matching brace is indented correctly (slow!)
blender_check_brace_indent(i)
# check previous character is either a '{' or whitespace.
if (tokens[i - 1].line == tok.line) and not (tokens[i - 1].text.isspace() or tokens[i - 1].text == "{"):
warning("E150", "no space before '{'", i, i)
blender_check_function_definition(i)
elif tok.type == Token.Operator:
# we check these in pairs, only want first
if tokens[i - 1].type != Token.Operator:
op, index_kw_end = extract_operator(i)
blender_check_operator(i, index_kw_end, op, is_cpp)
elif tok.type in Token.Comment:
doxyfn = None
if "\\file" in tok.text:
doxyfn = tok.text.split("\\file", 1)[1].strip().split()[0]
elif "@file" in tok.text:
doxyfn = tok.text.split("@file", 1)[1].strip().split()[0]
if doxyfn is not None:
doxyfn_base = os.path.basename(doxyfn)
if doxyfn_base != filepath_base:
warning("E151", "doxygen filename mismatch %s != %s" % (doxyfn_base, filepath_base), i, i)
doxyfn_split = doxyfn.split("/")
if len(doxyfn_split) > 1:
fp_split = filepath_split[-len(doxyfn_split):]
if doxyfn_split != fp_split:
warning("E151", "doxygen filepath mismatch %s != %s" % (doxyfn, "/".join(fp_split)), i, i)
del fp_split
del doxyfn_base, doxyfn_split
del doxyfn
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
# ensure line length
if (not args.no_length_check) and tok.type == Token.Text and tok.text == "\n":
# check line len
blender_check_linelength(index_line_start, i - 1, col)
col = 0
index_line_start = i + 1
else:
col += len(tok.text.expandtabs(TAB_SIZE))
#elif tok.type == Token.Name:
# print(tok.text)
#print(ttype, type(ttype))
#print((ttype, value))
#for ttype, value in la:
# #print(value, end="")
def scan_source_filepath(filepath, args):
# for quick tests
#~ if not filepath.endswith("creator.c"):
#~ return
code = open(filepath, 'r', encoding="utf-8").read()
# fast checks which don't require full parsing
quick_check_source(filepath, code, args)
# use lexer
scan_source(filepath, code, args)
def scan_source_recursive(dirpath, args):
import os
from os.path import join, splitext
def source_list(path, filename_check=None):
for dirpath, dirnames, filenames in os.walk(path):
# skip '.svn'
if dirpath.startswith("."):
continue
for filename in filenames:
filepath = join(dirpath, filename)
if filename_check is None or filename_check(filepath):
yield filepath
def is_source(filename):
ext = splitext(filename)[1]
return (ext in {".c", ".inl", ".cpp", ".cxx", ".cc", ".hpp", ".hxx", ".h", ".hh", ".osl"})
for filepath in sorted(source_list(dirpath, is_source)):
if is_ignore(filepath):
continue
scan_source_filepath(filepath, args)
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
def create_parser():
parser = argparse.ArgumentParser(
description=(
"Check C/C++ code for conformance with blenders style guide:\n"
"http://wiki.blender.org/index.php/Dev:Doc/CodeStyle)")
)
parser.add_argument(
"paths",
nargs='+',
help="list of files or directories to check",
)
parser.add_argument(
"-l",
"--no-length-check",
action="store_true",
help="skip warnings for long lines",
)
return parser
def main(argv=None):
import sys
import os
if argv is None:
argv = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(argv)
del argv
print("Scanning:", SOURCE_DIR)
# SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))))
# scan_source_recursive(os.path.join(SOURCE_DIR, "source", "blender", "bmesh"))
scan_source_recursive(os.path.join(SOURCE_DIR, "source/blender/makesrna/intern"), args)
sys.exit(0)
for filepath in args.paths:
if os.path.isdir(filepath):
# recursive search
scan_source_recursive(filepath, args)
else:
# single file
scan_source_filepath(filepath, args)
if __name__ == "__main__":
main()