Newer
Older
is_header = fp.endswith((".h", ".hxx", ".hpp"))
filepath = fp
lines = code.split("\n")
if is_header:
quick_check_include_guard(lines)
quick_check_indentation(lines)
def scan_source(fp, code, args):
# print("scanning: %r" % fp)
global filepath
is_cpp = fp.endswith((".cpp", ".cxx"))
filepath = fp
# if "displist.c" not in filepath:
# return
filepath_base = os.path.basename(filepath)
# print(highlight(code, CLexer(), RawTokenFormatter()).decode('utf-8'))
del tokens[:]
line = 1
for ttype, text in lex(code, CLexer()):
if text:
tokens.append(TokStore(ttype, text, line))
line += text.count("\n")
col = 0 # track line length
index_line_start = 0
for i, tok in enumerate(tokens):
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
if tok.type == Token.Keyword:
if tok.text in {"switch", "while", "if", "for"}:
item_range = extract_statement_if(i)
if item_range is not None:
blender_check_kw_if(item_range[0], i, item_range[1])
if tok.text == "switch":
blender_check_kw_switch(item_range[0], i, item_range[1])
elif tok.text == "else":
blender_check_kw_else(i)
elif tok.text == "sizeof":
blender_check_kw_sizeof(i)
elif tok.type == Token.Punctuation:
if tok.text == ",":
blender_check_comma(i)
elif tok.text == ".":
blender_check_period(i)
elif tok.text == "[":
# note, we're quite relaxed about this but
# disallow 'foo ['
if tokens[i - 1].text.isspace():
if is_cpp and tokens[i + 1].text == "]":
# c++ can do delete []
pass
else:
warning("E149", "space before '['", i, i)
elif tok.text == "(":
# check if this is a cast, eg:
# (char), (char **), (float (*)[3])
item_range = extract_cast(i)
if item_range is not None:
blender_check_cast(item_range[0], item_range[1])
elif tok.text == "{":
# check matching brace is indented correctly (slow!)
blender_check_brace_indent(i)
# check previous character is either a '{' or whitespace.
if (tokens[i - 1].line == tok.line) and not (tokens[i - 1].text.isspace() or tokens[i - 1].text == "{"):
warning("E150", "no space before '{'", i, i)
blender_check_function_definition(i)
elif tok.type == Token.Operator:
# we check these in pairs, only want first
if tokens[i - 1].type != Token.Operator:
op, index_kw_end = extract_operator(i)
blender_check_operator(i, index_kw_end, op, is_cpp)
elif tok.type in Token.Comment:
doxyfn = None
if "\\file" in tok.text:
doxyfn = tok.text.split("\\file", 1)[1].strip().split()[0]
elif "@file" in tok.text:
doxyfn = tok.text.split("@file", 1)[1].strip().split()[0]
if doxyfn is not None:
doxyfn_base = os.path.basename(doxyfn)
if doxyfn_base != filepath_base:
warning("E151", "doxygen filename mismatch %s != %s" % (doxyfn_base, filepath_base), i, i)
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
# ensure line length
if (not args.no_length_check) and tok.type == Token.Text and tok.text == "\n":
# check line len
blender_check_linelength(index_line_start, i - 1, col)
col = 0
index_line_start = i + 1
else:
col += len(tok.text.expandtabs(TAB_SIZE))
#elif tok.type == Token.Name:
# print(tok.text)
#print(ttype, type(ttype))
#print((ttype, value))
#for ttype, value in la:
# #print(value, end="")
def scan_source_filepath(filepath, args):
# for quick tests
#~ if not filepath.endswith("creator.c"):
#~ return
code = open(filepath, 'r', encoding="utf-8").read()
# fast checks which don't require full parsing
quick_check_source(filepath, code, args)
# use lexer
scan_source(filepath, code, args)
def scan_source_recursive(dirpath, args):
import os
from os.path import join, splitext
def source_list(path, filename_check=None):
for dirpath, dirnames, filenames in os.walk(path):
# skip '.svn'
if dirpath.startswith("."):
continue
for filename in filenames:
filepath = join(dirpath, filename)
if filename_check is None or filename_check(filepath):
yield filepath
def is_source(filename):
ext = splitext(filename)[1]
return (ext in {".c", ".inl", ".cpp", ".cxx", ".cc", ".hpp", ".hxx", ".h", ".hh", ".osl"})
for filepath in sorted(source_list(dirpath, is_source)):
if is_ignore(filepath):
continue
scan_source_filepath(filepath, args)
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
def create_parser():
parser = argparse.ArgumentParser(
description=(
"Check C/C++ code for conformance with blenders style guide:\n"
"http://wiki.blender.org/index.php/Dev:Doc/CodeStyle)")
)
parser.add_argument(
"paths",
nargs='+',
help="list of files or directories to check",
)
parser.add_argument(
"-l",
"--no-length-check",
action="store_true",
help="skip warnings for long lines",
)
return parser
def main(argv=None):
import sys
import os
if argv is None:
argv = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(argv)
del argv
print("Scanning:", SOURCE_DIR)
# SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))))
# scan_source_recursive(os.path.join(SOURCE_DIR, "source", "blender", "bmesh"))
scan_source_recursive(os.path.join(SOURCE_DIR, "source/blender/makesrna/intern"), args)
sys.exit(0)
for filepath in args.paths:
if os.path.isdir(filepath):
# recursive search
scan_source_recursive(filepath, args)
else:
# single file
scan_source_filepath(filepath, args)
if __name__ == "__main__":
main()