Method: pylint.checkers.format.FormatChecker.process_tokens
Calls: 1113, Exceptions: 1, Paths: 16Back
Path 1: 652 calls (0.59)
list (652)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 2: 112 calls (0.1)
list (112)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 3: 95 calls (0.09)
list (95)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 4: 92 calls (0.08)
list (92)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 5: 58 calls (0.05)
list (58)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 6: 25 calls (0.02)
list (25)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 7: 18 calls (0.02)
list (18)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 8: 17 calls (0.02)
list (17)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 9: 13 calls (0.01)
list (13)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 10: 13 calls (0.01)
list (13)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 11: 8 calls (0.01)
list (8)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 12: 3 calls (0.0)
list (3)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 13: 2 calls (0.0)
list (2)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 14: 2 calls (0.0)
list (2)
GeneratorExit (1)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 15: 2 calls (0.0)
list (2)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)
Path 16: 1 calls (0.0)
list (1)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 """Process tokens and search for:
3
4 - too long lines (i.e. longer than <max_chars>)
5 - optionally bad construct (if given, bad_construct must be a compiled
6 regular expression).
7 """
8 indents = [0]
9 check_equal = False
10 line_num = 0
11 self._lines = {}
12 self._visited_lines = {}
13 self._last_line_ending: str | None = None
14 last_blank_line_num = 0
15 for idx, (tok_type, string, start, _, line) in enumerate(tokens):
16 if start[0] != line_num:
17 line_num = start[0]
18 # A tokenizer oddity: if an indented line contains a multi-line
19 # docstring, the line member of the INDENT token does not contain
20 # the full line; therefore we check the next token on the line.
21 if tok_type == tokenize.INDENT:
22 self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
23 else:
24 self.new_line(TokenWrapper(tokens), idx - 1, idx)
25
26 if tok_type == tokenize.NEWLINE:
27 # a program statement, or ENDMARKER, will eventually follow,
28 # after some (possibly empty) run of tokens of the form
29 # (NL | COMMENT)* (INDENT | DEDENT+)?
30 # If an INDENT appears, setting check_equal is wrong, and will
31 # be undone when we see the INDENT.
32 check_equal = True
33 self._check_line_ending(string, line_num)
34 elif tok_type == tokenize.INDENT:
35 check_equal = False
36 self.check_indent_level(string, indents[-1] + 1, line_num)
37 indents.append(indents[-1] + 1)
38 elif tok_type == tokenize.DEDENT:
39 # there's nothing we need to check here! what's important is
40 # that when the run of DEDENTs ends, the indentation of the
41 # program statement (or ENDMARKER) that triggered the run is
42 # equal to what's left at the top of the indents stack
43 check_equal = True
44 if len(indents) > 1:
45 del indents[-1]
46 elif tok_type == tokenize.NL:
47 if not line.strip("\r\n"):
48 last_blank_line_num = line_num
49 elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
50 # This is the first concrete token following a NEWLINE, so it
51 # must be the first token of the next program statement, or an
52 # ENDMARKER; the "line" argument exposes the leading white-space
53 # for this statement; in the case of ENDMARKER, line is an empty
54 # string, so will properly match the empty string with which the
55 # "indents" stack was seeded
56 if check_equal:
57 check_equal = False
58 self.check_indent_level(line, indents[-1], line_num)
59
60 if tok_type == tokenize.NUMBER and string.endswith("l"):
61 self.add_message("lowercase-l-suffix", line=line_num)
62
63 if string in _KEYWORD_TOKENS:
64 self._check_keyword_parentheses(tokens, idx)
65
66 line_num -= 1 # to be ok with "wc -l"
67 if line_num > self.linter.config.max_module_lines:
68 # Get the line where the too-many-lines (or its message id)
69 # was disabled or default to 1.
70 message_definition = self.linter.msgs_store.get_message_definitions(
71 "too-many-lines"
72 )[0]
73 names = (message_definition.msgid, "too-many-lines")
74 lineno = next(
75 filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
76 1,
77 )
78 self.add_message(
79 "too-many-lines",
80 args=(line_num, self.linter.config.max_module_lines),
81 line=lineno,
82 )
83
84 # See if there are any trailing lines. Do not complain about empty
85 # files like __init__.py markers.
86 if line_num == last_blank_line_num and line_num > 0:
87 self.add_message("trailing-newlines", line=line_num)