Newer
Older
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Contributor(s): Campbell Barton
#
# #**** END GPL LICENSE BLOCK #****
# <pep8 compliant>
"""
This script runs outside of blender and scans source
python3 source/tools/check_source/check_style_c.py source/
from check_style_c_config import IGNORE, IGNORE_DIR, SOURCE_DIR, TAB_SIZE, LIN_SIZE
IGNORE = tuple([os.path.join(SOURCE_DIR, ig) for ig in IGNORE])
IGNORE_DIR = tuple([os.path.join(SOURCE_DIR, ig) for ig in IGNORE_DIR])
WARN_TEXT = False
def is_ignore(f):
for ig in IGNORE:
if f == ig:
return True
for ig in IGNORE_DIR:
if f.startswith(ig):
return True
return False
def hash_of_file(fp):
import hashlib
with open(fp, 'rb') as fh:
return hashlib.sha512(fh.read()).digest()
# TODO
#
# Add checks for:
# - macro brace use
# - line length - in a not-too-annoying way
# (allow for long arrays in struct definitions, PyMethodDef for eg)
from pygments import lex # highlight
from pygments.lexers import CLexer
from pygments.token import Token
import argparse
PRINT_QTC_TASKFORMAT = False
if "USE_QTC_TASK" in os.environ:
PRINT_QTC_TASKFORMAT = True
global filepath
tokens = []
# could store index here too, then have prev/next methods
class TokStore:
__slots__ = (
"type",
"text",
"line",
"flag",
def __init__(self, type, text, line):
self.type = type
self.text = text
self.line = line
# flags
IS_CAST = (1 << 0)
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
def tk_range_to_str(a, b, expand_tabs=False):
txt = "".join([tokens[i].text for i in range(a, b + 1)])
if expand_tabs:
txt = txt.expandtabs(TAB_SIZE)
return txt
def tk_item_is_newline(tok):
return tok.type == Token.Text and tok.text.strip("\t ") == "\n"
def tk_item_is_ws_newline(tok):
return (tok.text == "") or \
(tok.type == Token.Text and tok.text.isspace()) or \
(tok.type in Token.Comment)
def tk_item_is_ws(tok):
return (tok.text == "") or \
(tok.type == Token.Text and tok.text.strip("\t ") != "\n" and tok.text.isspace()) or \
(tok.type in Token.Comment)
# also skips comments
def tk_advance_ws(index, direction):
while tk_item_is_ws(tokens[index + direction]) and index > 0:
index += direction
return index
def tk_advance_no_ws(index, direction):
index += direction
while tk_item_is_ws(tokens[index]) and index > 0:
index += direction
return index
def tk_advance_ws_newline(index, direction):
while tk_item_is_ws_newline(tokens[index + direction]) and index > 0:
index += direction
return index + direction
def tk_advance_line_start(index):
""" Go the the first non-whitespace token of the line.
"""
while tokens[index].line == tokens[index - 1].line and index > 0:
index -= 1
return tk_advance_no_ws(index, 1)
def tk_advance_line(index, direction):
line = tokens[index].line
while tokens[index + direction].line == line or tokens[index].text == "\n":
index += direction
return index
def tk_advance_to_token(index, direction, text, type_):
""" Advance to a token.
"""
index += direction
while True:
if (tokens[index].text == text) and (tokens[index].type == type_):
return index
index += direction
return None
def tk_advance_line_to_token(index, direction, text, type_):
""" Advance to a token (on the same line).
"""
assert(isinstance(text, str))
line = tokens[index].line
index += direction
while tokens[index].line == line:
if (tokens[index].text == text) and (tokens[index].type == type_):
return index
index += direction
return None
def tk_advance_line_to_token_with_fn(index, direction, text, fn):
""" Advance to a token (on the same line).
"""
assert(isinstance(text, str))
line = tokens[index].line
index += direction
while tokens[index].line == line:
if (tokens[index].text == text) and fn(tokens[index]):
return index
index += direction
return None
def tk_advance_flag(index, direction, flag):
state = (tokens[index].flag & flag)
while ((tokens[index + direction].flag) & flag == state) and index > 0:
index += direction
return index
def tk_match_backet(index):
backet_start = tokens[index].text
assert(tokens[index].type == Token.Punctuation)
assert(backet_start in "[]{}()")
if tokens[index].text in "({[":
direction = 1
backet_end = {"(": ")", "[": "]", "{": "}"}[backet_start]
else:
direction = -1
backet_end = {")": "(", "]": "[", "}": "{"}[backet_start]
level = 1
index_match = index + direction
while True:
item = tokens[index_match]
# For checking odd braces:
# print(filepath, tokens[index].line, item.line)
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
if item.type == Token.Punctuation:
if item.text == backet_start:
level += 1
elif item.text == backet_end:
level -= 1
if level == 0:
break
index_match += direction
return index_match
def tk_index_is_linestart(index):
index_prev = tk_advance_ws_newline(index, -1)
return tokens[index_prev].line < tokens[index].line
def extract_to_linestart(index):
ls = []
line = tokens[index].line
index -= 1
while index > 0 and tokens[index].line == line:
ls.append(tokens[index].text)
index -= 1
if index != 0:
ls.append(tokens[index].text.rsplit("\n", 1)[1])
ls.reverse()
return "".join(ls)
def extract_ws_indent(index):
# could optimize this
text = extract_to_linestart(index)
return text[:len(text) - len(text.lstrip("\t"))]
def extract_statement_if(fn, index_kw):
# assert(tokens[index_kw].text == "if")
# ignore preprocessor
i_linestart = tk_advance_line_start(index_kw)
if tokens[i_linestart].text.startswith("#"):
return None
i_start = tk_advance_ws(index_kw - 1, direction=-1)
# seek forward
i_next = tk_advance_ws_newline(index_kw, direction=1)
# print(tokens[i_next])
if tokens[i_next].type != Token.Punctuation or tokens[i_next].text != "(":
warning(fn, "E105", "no '(' after '%s'" % tokens[index_kw].text, i_start, i_next)
return None
i_end = tk_match_backet(i_next)
return (i_start, i_end)
def extract_operator(index_op):
op_text = ""
i = 0
while tokens[index_op + i].type == Token.Operator:
op_text += tokens[index_op + i].text
i += 1
return op_text, index_op + (i - 1)
def extract_cast(index):
# to detect a cast is quite involved... sigh
# assert(tokens[index].text == "(")
# TODO, comment within cast, but that's rare
i_start = index
i_end = tk_match_backet(index)
# first check we are not '()'
if i_start + 1 == i_end:
return None
# check we have punctuation before the cast
i = i_start - 1
while tokens[i].text.isspace():
i -= 1
# avoids 'foo(bar)test'
# but not ' = (bar)test'
if tokens[i].type == Token.Keyword:
# allow 'return (bar)test'
if tokens[i].text != "return":
return None
elif tokens[i].type == Token.Name:
return None
# validate types
tokens_cast = [tokens[i] for i in range(i_start + 1, i_end)]
for t in tokens_cast:
if t.type == Token.Keyword:
# allow: '(struct FooBar)'
if t.text not in {"const", "struct"}:
return None
elif t.type == Token.Operator and t.text != "*":
# prevent '(a + b)'
# note, we could have '(float(*)[1+2])' but this is unlikely
return None
elif t.type == Token.Punctuation and t.text not in '()[]':
# prevent '(a, b)'
return None
tokens_cast_strip = []
for t in tokens_cast:
if t.type in Token.Comment:
pass
elif t.type == Token.Text and t.text.isspace():
pass
elif t.type == Token.Keyword and t.text in {"const", "struct"}:
pass
elif t.type == Token.Keyword.Type and tokens_cast_strip and tokens_cast_strip[-1].type == Token.Keyword.Type:
# ignore chained types `signed char`, `unsigned int`... etc.
tokens_cast_strip[-1] = t
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
else:
tokens_cast_strip.append(t)
# check token order and types
if not tokens_cast_strip:
return None
if tokens_cast_strip[0].type not in {Token.Name, Token.Type, Token.Keyword.Type}:
return None
t_prev = None
for t in tokens_cast_strip[1:]:
# prevent identifiers after the first: '(a b)'
if t.type in {Token.Keyword.Type, Token.Name, Token.Text}:
return None
# prevent: '(a * 4)'
# allow: '(a (*)[4])'
if t_prev is not None and t_prev.text == "*" and t.type != Token.Punctuation:
return None
t_prev = t
del t_prev
# debug only
'''
string = "".join(tokens[i].text for i in range(i_start, i_end + 1))
#string = "".join(tokens[i].text for i in range(i_start + 1, i_end))
#types = [tokens[i].type for i in range(i_start + 1, i_end)]
types = [t.type for t in tokens_cast_strip]
print("STRING:", string)
print("TYPES: ", types)
print()
'''
# Set the cast flags, so other checkers can use
for i in range(i_start, i_end + 1):
tokens[i].flag |= IS_CAST
return (i_start, i_end)
def tk_range_find_by_type(index_start, index_end, type_, filter_tk=None):
if index_start < index_end:
for i in range(index_start, index_end + 1):
if tokens[i].type == type_:
if filter_tk is None or filter_tk(tokens[i]):
return i
return -1
def warning(fn, id_, message, index_kw_start, index_kw_end):
if PRINT_QTC_TASKFORMAT:
fn("%s\t%d\t%s\t%s %s" % (filepath, tokens[index_kw_start].line, "comment", id_, message))
fn("%s:%d: %s: %s" % (filepath, tokens[index_kw_start].line, id_, message))
fn(tk_range_to_str(index_kw_start, index_kw_end, expand_tabs=True))
def warning_lineonly(fn, id_, message, line):
if PRINT_QTC_TASKFORMAT:
fn("%s\t%d\t%s\t%s %s" % (filepath, line, "comment", id_, message))
fn("%s:%d: %s: %s" % (filepath, line, id_, message))
# print(tk_range_to_str(index_kw_start, index_kw_end))
# ------------------------------------------------------------------
# Own Blender rules here!
def blender_check_kw_if(fn, index_kw_start, index_kw, index_kw_end):
# check if we have: 'if('
if not tk_item_is_ws(tokens[index_kw + 1]):
warning(fn, "E106", "no white space between '%s('" % tokens[index_kw].text, index_kw_start, index_kw_end)
# check for: ){
index_next = tk_advance_ws_newline(index_kw_end, 1)
if tokens[index_next].type == Token.Punctuation and tokens[index_next].text == "{":
if not tk_item_is_ws_newline(tokens[index_next - 1]):
warning(fn, "E107", "no white space between trailing bracket '%s (){'" %
tokens[index_kw].text, index_kw_start, index_kw_end)
# check for: if ()
# {
# note: if the if statement is multi-line we allow it
if ((tokens[index_kw].line == tokens[index_kw_end].line) and
(tokens[index_kw].line == tokens[index_next].line - 1)):
if ((tokens[index_kw].line + 1 != tokens[index_next].line) and
(tk_range_find_by_type(index_kw + 1, index_next - 1, Token.Comment.Preproc,
filter_tk=lambda tk: tk.text in {
"if", "ifdef", "ifndef", "else", "elif", "endif"}) != -1)):
# allow this to go unnoticed
pass
else:
warning(fn, "E108", "if body brace on a new line '%s ()\\n{'" %
tokens[index_kw].text, index_kw, index_kw_end)
else:
# no '{' on a multi-line if
if tokens[index_kw].line != tokens[index_kw_end].line:
Campbell Barton
committed
# double check this is not...
# if (a &&
# b); <--
#
# While possible but not common for 'if' statements, it's used in this example:
Campbell Barton
committed
#
# do {
# foo;
# } while(a &&
# b);
#
if not (tokens[index_next].type == Token.Punctuation and tokens[index_next].text == ";"):
warning(fn, "E109", "multi-line if should use a brace '%s (\\n\\n) statement;'" %
tokens[index_kw].text, index_kw, index_kw_end)
Campbell Barton
committed
# check for correct single line use & indentation
if not (tokens[index_next].type == Token.Punctuation and tokens[index_next].text == ";"):
if tokens[index_next].type == Token.Keyword and tokens[index_next].text in {"if", "while", "for"}:
ws_kw = extract_ws_indent(index_kw)
ws_end = extract_ws_indent(index_next)
if len(ws_kw) + 1 != len(ws_end):
# check for:
# #ifdef FOO
# if (a)
# #else
# if (b)
# endif
# { ...
#
has_sep = False
if len(ws_kw) == len(ws_end):
for i in range(index_kw + 1, index_next):
if tokens[i].type == Token.Comment.Preproc:
has_sep = True
break
if not has_sep:
warning(fn, "E200", "bad single line indent '%s (...) {'" %
tokens[index_kw].text, index_kw, index_next)
del has_sep
else:
index_end = tk_advance_to_token(index_next, 1, ";", Token.Punctuation)
if tokens[index_kw].line != tokens[index_end].line:
# check for:
# if (a)
# b;
#
# should be:
#
# if (a)
# b;
ws_kw = extract_ws_indent(index_kw)
ws_end = extract_ws_indent(index_end)
if len(ws_kw) + 1 != len(ws_end):
warning(fn, "E201", "bad single line indent '%s (...) {'" %
tokens[index_kw].text, index_kw, index_end)
del ws_kw, ws_end
del index_end
Campbell Barton
committed
# multi-line statement
if (tokens[index_kw].line != tokens[index_kw_end].line):
Campbell Barton
committed
# check for: if (a &&
# b) { ...
# brace should be on a newline.
#
if tokens[index_kw_end].line == tokens[index_next].line:
Campbell Barton
committed
if not (tokens[index_next].type == Token.Punctuation and tokens[index_next].text == ";"):
warning(fn, "E103", "multi-line should use a on a new line '%s (\\n\\n) {'" %
tokens[index_kw].text, index_kw, index_kw_end)
Campbell Barton
committed
# Note: this could be split into its own function
# since it's not specific to if-statements,
Campbell Barton
committed
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
# can also work for function calls.
#
# check indentation on a multi-line statement:
# if (a &&
# b)
# {
#
# should be:
# if (a &&
# b)
# {
# Skip the first token
# Extract ' if (' then convert to
# ' ' and check lines for correct indent.
index_kw_bracket = tk_advance_ws_newline(index_kw, 1)
ws_indent = extract_to_linestart(index_kw_bracket + 1)
ws_indent = "".join([("\t" if c == "\t" else " ") for c in ws_indent])
l_last = tokens[index_kw].line
for i in range(index_kw + 1, index_kw_end + 1):
if tokens[i].line != l_last:
l_last = tokens[i].line
# ignore blank lines
if tokens[i].text == "\n":
pass
elif tokens[i].text.startswith("#"):
pass
else:
# check indentation is good
# use startswith because there are function calls within 'if' checks sometimes.
ws_indent_test = extract_to_linestart(i + 1)
# print("indent: %r %s" % (ws_indent_test, tokens[i].text))
Campbell Barton
committed
if ws_indent_test.startswith(ws_indent):
pass
elif tokens[i].text.startswith(ws_indent):
# needed for some comments
pass
else:
warning(fn, "E110", "if body brace mult-line indent mismatch", i, i)
Campbell Barton
committed
del index_kw_bracket
del ws_indent
del l_last
# check for: if () { ... };
#
# no need to have semicolon after brace.
if tokens[index_next].text == "{":
index_final = tk_match_backet(index_next)
index_final_step = tk_advance_no_ws(index_final, 1)
if tokens[index_final_step].text == ";":
warning(fn, "E111", "semi-colon after brace '%s () { ... };'" %
tokens[index_kw].text, index_final_step, index_final_step)
def blender_check_kw_else(fn, index_kw):
# for 'else if' use the if check.
i_next = tk_advance_ws_newline(index_kw, 1)
# check there is at least one space between:
# else{
if index_kw + 1 == i_next:
warning(fn, "E112", "else has no space between following brace 'else{'", index_kw, i_next)
# check if there are more than 1 spaces after else, but nothing after the following brace
# else {
# ...
#
# check for this case since this is needed sometimes:
# else { a = 1; }
(tokens[index_kw + 1].type == Token.Text) and
(len(tokens[index_kw + 1].text) > 1) and
(tokens[index_kw + 1].text.isspace())):
# check if the next data after { is on a newline
i_next_next = tk_advance_ws_newline(i_next, 1)
if tokens[i_next].line != tokens[i_next_next].line:
warning(fn, "E113", "unneeded whitespace before brace 'else ... {'", index_kw, i_next)
# this check only tests for:
# else
# {
# ... which is never OK
#
# ... except if you have
# else
# #preprocessor
# {
if tokens[i_next].type == Token.Punctuation and tokens[i_next].text == "{":
if tokens[index_kw].line < tokens[i_next].line:
# check for preproc
i_newline = tk_advance_line(index_kw, 1)
if tokens[i_newline].text.startswith("#"):
pass
else:
warning(fn, "E114", "else body brace on a new line 'else\\n{'", index_kw, i_next)
# this check only tests for:
# else
# if
# ... which is never OK
if tokens[i_next].type == Token.Keyword and tokens[i_next].text == "if":
if tokens[index_kw].line < tokens[i_next].line:
# allow for:
# ....
# else
# #endif
# if
if ((tokens[index_kw].line + 1 != tokens[i_next].line) and
any(True for i in range(index_kw + 1, i_next)
if (tokens[i].type == Token.Comment.Preproc and
tokens[i].text.lstrip("# \t").startswith((
"if", "ifdef", "ifndef",
"else", "elif", "endif",
))
)
)):
if ((tokens[index_kw].line + 1 != tokens[i_next].line) and
(tk_range_find_by_type(index_kw + 1, i_next - 1, Token.Comment.Preproc,
filter_tk=lambda tk: tk.text in {
"if", "ifdef", "ifndef", "else", "elif", "endif", }) != -1)):
# allow this to go unnoticed
pass
warning(fn, "E115", "else if is split by a new line 'else\\nif'", index_kw, i_next)
# check
# } else
# ... which is never OK
i_prev = tk_advance_no_ws(index_kw, -1)
if tokens[i_prev].type == Token.Punctuation and tokens[i_prev].text == "}":
if tokens[index_kw].line == tokens[i_prev].line:
warning(fn, "E116", "else has no newline before the brace '} else'", i_prev, index_kw)
def blender_check_kw_switch(fn, index_kw_start, index_kw, index_kw_end):
# In this function we check the body of the switch
# switch (value) {
# ...
# }
# assert(tokens[index_kw].text == "switch")
index_next = tk_advance_ws_newline(index_kw_end, 1)
if tokens[index_next].type == Token.Punctuation and tokens[index_next].text == "{":
ws_switch_indent = extract_to_linestart(index_kw)
if ws_switch_indent.isspace():
# 'case' should have at least 1 indent.
# otherwise expect 2 indent (or more, for nested switches)
ws_test = {
"case": ws_switch_indent + "\t",
"default:": ws_switch_indent + "\t",
"break": ws_switch_indent + "\t\t",
"return": ws_switch_indent + "\t\t",
"continue": ws_switch_indent + "\t\t",
"goto": ws_switch_indent + "\t\t",
index_final = tk_match_backet(index_next)
for i in range(index_next + 1, index_final):
# 'default' is seen as a label
# print(tokens[i].type, tokens[i].text)
if tokens[i].type in {Token.Keyword, Token.Name.Label}:
if tokens[i].text in {"case", "default:", "break", "return", "continue", "goto"}:
ws_other_indent = extract_to_linestart(i)
# non ws start - we ignore for now, allow case A: case B: ...
if ws_other_indent.isspace():
ws_test_other = ws_test[tokens[i].text]
if not ws_other_indent.startswith(ws_test_other):
warning(fn, "E117", "%s is not indented enough" % tokens[i].text, i, i)
if tokens[i].text == "case":
# while where here, check:
# case ABC :
# should be...
# case ABC:
# Note, this might be either 'Punctuation' or 'Operator', we need to check both.
i_case = tk_advance_line_to_token_with_fn(
i, 1, ":",
lambda t: t.type in {Token.Punctuation, Token.Operator})
# can be None when the identifier isn't an 'int'
if i_case is not None:
if tokens[i_case - 1].text.isspace():
warning(fn, "E132", "%s space before colon" % tokens[i].text, i, i_case)
warning(fn, "E119", "switch isn't the first token in the line", index_kw_start, index_kw_end)
warning(fn, "E120", "switch brace missing", index_kw_start, index_kw_end)
def blender_check_kw_sizeof(fn, index_kw):
if tokens[index_kw + 1].text != "(":
warning(fn, "E121", "expected '%s('" % tokens[index_kw].text, index_kw, index_kw + 1)
def blender_check_cast(fn, index_kw_start, index_kw_end):
# detect: '( float...'
if tokens[index_kw_start + 1].text.isspace():
warning(fn, "E122", "cast has space after first bracket '( type...'", index_kw_start, index_kw_end)
# detect: '...float )'
if tokens[index_kw_end - 1].text.isspace():
warning(fn, "E123", "cast has space before last bracket '... )'", index_kw_start, index_kw_end)
# detect no space before operator: '(float*)'
for i in range(index_kw_start + 1, index_kw_end):
if tokens[i].text == "*":
# allow: '(*)'
if tokens[i - 1].type == Token.Punctuation:
pass
elif tokens[i - 1].text.isspace():
pass
else:
warning(fn, "E124", "cast has no preceding whitespace '(type*)'", index_kw_start, index_kw_end)
def blender_check_comma(fn, index_kw):
i_next = tk_advance_ws_newline(index_kw, 1)
# check there is at least one space between:
# ,sometext
if index_kw + 1 == i_next:
# allow: (struct FooBar){ .a, .b, .c,}
if tokens[i_next].text != "}":
warning(fn, "E125", "comma has no space after it ',sometext'", index_kw, i_next)
if tokens[index_kw - 1].type == Token.Text and tokens[index_kw - 1].text.isspace():
warning(fn, "E126", "comma space before it 'sometext ,", index_kw, i_next)
def blender_check_period(fn, index_kw):
# check we're now apart of ...
if (tokens[index_kw - 1].text == ".") or (tokens[index_kw + 1].text == "."):
return
# 'a.b'
if tokens[index_kw - 1].type == Token.Text and tokens[index_kw - 1].text.isspace():
# C99 allows struct members to be declared as follows:
# struct FooBar = { .a = 1, .b = 2, };
# ... check for this case by allowing comma or brace beforehand.
i_prev = tk_advance_ws_newline(index_kw - 1, -1)
if tokens[i_prev].text not in {",", "{"}:
warning(fn, "E127", "period space before it 'sometext .", index_kw, index_kw)
if tokens[index_kw + 1].type == Token.Text and tokens[index_kw + 1].text.isspace():
warning(fn, "E128", "period space after it '. sometext", index_kw, index_kw)
def _is_ws_pad(index_start, index_end):
return (tokens[index_start - 1].text.isspace() and
tokens[index_end + 1].text.isspace())
def blender_check_operator(fn, index_start, index_end, op_text, is_cpp):
if op_text == "->":
# allow compiler to handle
return
if len(op_text) == 1:
if op_text in {"+", "-"}:
# detect (-a) vs (a - b)
index_prev = index_start - 1
if (tokens[index_prev].text.isspace() and
index_prev -= 1
if tokens[index_prev].flag & IS_CAST:
index_prev = tk_advance_flag(index_prev, -1, IS_CAST)
if not (
tokens[index_prev].text.isspace() or
(tokens[index_prev].text in {"[", "(", "{"}) or
# Allow: (uint)-1
# Or: (struct FooBar)&var
(tokens[index_prev].flag & IS_CAST)
):
warning(fn, "E130", "no space before operator '%s'" % op_text, index_start, index_end)
tokens[index_end + 1].text not in {"]", ")", "}"}):
# TODO, needs work to be useful
# warning(fn, "E130", "no space after operator '%s'" % op_text, index_start, index_end)
elif op_text in {"/", "%", "^", "|", "=", "<", ">", "?"}:
if not _is_ws_pad(index_start, index_end):
if not (is_cpp and ("<" in op_text or ">" in op_text)):
warning(fn, "E130", "no space around operator '%s'" % op_text, index_start, index_end)
elif op_text == ":":
# check we're not
# case 1:
#
# .. somehow 'case A:' doesn't suffer from this problem.
#
# note, it looks like this may be a quirk in pygments, how it handles 'default' too.
if not (tokens[index_start - 1].text.isspace() or tokens[index_start - 1].text == "default"):
i_case = tk_advance_line_to_token(index_start, -1, "case", Token.Keyword)
if i_case is None:
warning(fn, "E130", "no space around operator '%s'" % op_text, index_start, index_end)
elif op_text == "&":
pass # TODO, check if this is a pointer reference or not
elif op_text == "*":
index_prev = index_start - 1
if (tokens[index_prev].text.isspace() and
index_prev -= 1
if tokens[index_prev].flag & IS_CAST:
index_prev = tk_advance_flag(index_prev, -1, IS_CAST)
# This check could be improved, it's a bit fuzzy
(tokens[index_start + 1].flag & IS_CAST)):
# allow:
# a = *(int *)b;
# and:
# b = (int *)*b;
pass
(tokens[index_start + 1].type in Token.Number)):
warning(fn, "E130", "no space around operator '%s'" % op_text, index_start, index_end)
elif not (tokens[index_start - 1].text.isspace() or tokens[index_start - 1].text in {"(", "[", "{"}):
warning(fn, "E130", "no space before operator '%s'" % op_text, index_start, index_end)
elif len(op_text) == 2:
# todo, remove operator check from `if`
if op_text in {"+=", "-=", "*=", "/=", "&=", "|=", "^=",
"&&", "||",
"==", "!=", "<=", ">=",
"<<", ">>",
"%=",
# not operators, pointer mix-ins
">*", "<*", "-*", "+*", "=*", "/*", "%*", "^*", "|*",
}:
if not _is_ws_pad(index_start, index_end):
if not (is_cpp and ("<" in op_text or ">" in op_text)):
warning(fn, "E130", "no space around operator '%s'" % op_text, index_start, index_end)
elif op_text in {"++", "--"}:
pass # TODO, figure out the side we are adding to!
'''
if (tokens[index_start - 1].text.isspace() or
tokens[index_end + 1].text.isspace()):
warning(fn, "E130", "spaces surrounding operator '%s'" % op_text, index_start, index_end)
'''
elif op_text in {"!!", "!*"}:
# operators we _dont_ want whitespace after (pointers mainly)
# we can assume these are pointers
if tokens[index_end + 1].text.isspace():
warning(fn, "E130", "spaces after operator '%s'" % op_text, index_start, index_end)
elif op_text == "**":
pass # handle below
elif op_text == "::":
pass # C++, ignore for now
elif op_text == ":!*":
pass # ignore for now
elif op_text == "*>":
pass # ignore for now, C++ <Class *>
else:
warning(fn, "E000.0", "unhandled operator 2 '%s'" % op_text, index_start, index_end)
elif len(op_text) == 3:
if op_text in {">>=", "<<="}:
if not _is_ws_pad(index_start, index_end):
if not (is_cpp and ("<" in op_text or ">" in op_text)):
warning(fn, "E130", "no space around operator '%s'" % op_text, index_start, index_end)
elif op_text == "***":
pass
elif op_text in {"*--", "*++"}:
pass
elif op_text in {"--*", "++*"}:
pass
elif op_text == ">::":
pass
elif op_text == "::~":
pass
else:
warning(fn, "E000.1", "unhandled operator 3 '%s'" % op_text, index_start, index_end)
elif len(op_text) == 4:
if op_text == "*>::":
pass
else:
warning(fn, "E000.2", "unhandled operator 4 '%s'" % op_text, index_start, index_end)
warning(fn, "E000.3", "unhandled operator (len > 4) '%s'" % op_text, index_start, index_end)
if len(op_text) > 1:
if op_text[0] == "*" and op_text[-1] == "*":
(not tokens[index_start - 1].type == Token.Punctuation)):
warning(fn, "E130", "no space before pointer operator '%s'" % op_text, index_start, index_end)
if tokens[index_end + 1].text.isspace():
warning(fn, "E130", "space before pointer operator '%s'" % op_text, index_start, index_end)
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
# check if we are first in the line
if op_text[0] == "!":
# if (a &&
# !b)
pass
elif op_text[0] == "*" and tokens[index_start + 1].text.isspace() is False:
pass # *a = b
elif len(op_text) == 1 and op_text[0] == "-" and tokens[index_start + 1].text.isspace() is False:
pass # -1
elif len(op_text) == 2 and op_text == "++" and tokens[index_start + 1].text.isspace() is False:
pass # ++a
elif len(op_text) == 2 and op_text == "--" and tokens[index_start + 1].text.isspace() is False:
pass # --a
elif len(op_text) == 1 and op_text[0] == "&":
# if (a &&
# &b)
pass
elif len(op_text) == 1 and op_text[0] == "~":
# C++
# ~ClassName
pass
elif len(op_text) == 1 and op_text[0] == "?":
# (a == b)
# ? c : d
pass
elif len(op_text) == 1 and op_text[0] == ":":
# a = b ? c
# : d
pass
else:
if tk_index_is_linestart(index_start):
warning(fn, "E143", "operator starts a new line '%s'" % op_text, index_start, index_end)
def blender_check_linelength(fn, index_start, index_end, length):
if length > LIN_SIZE:
text = tk_range_to_str(index_start, index_end, expand_tabs=True)
for l in text.split("\n"):
if len(l) > LIN_SIZE:
warning(fn, "E144", "line length %d > %d" % (len(l), LIN_SIZE), index_start, index_end)
def blender_check_function_definition(fn, i):
# Warning, this is a fairly slow check and guesses
# based on some fuzzy rules
# assert(tokens[index].text == "{")
# check function declaration is not:
# 'void myfunc() {'
# ... other uses are handled by checks for statements
# this check is rather simplistic but tends to work well enough.
i_prev = i - 1
while tokens[i_prev].text == "":
i_prev -= 1
# ensure this isn't '{' in its own line
if tokens[i_prev].line == tokens[i].line:
# check we '}' isn't on same line...
i_next = i + 1
found = False
while tokens[i_next].line == tokens[i].line:
if tokens[i_next].text == "}":
found = True
break
i_next += 1
del i_next
if found is False:
# First check this isn't an assignment
i_prev = tk_advance_no_ws(i, -1)
# avoid '= {'