Method: pylint.checkers.strings.StringConstantChecker.process_tokens
Calls: 1120, Exceptions: 0, Paths: 4Back
Path 1: 814 calls (0.73)
list (814)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 encoding = "ascii"
3 for i, (token_type, token, start, _, line) in enumerate(tokens):
4 if token_type == tokenize.ENCODING:
5 # this is always the first token processed
6 encoding = token
7 elif token_type == tokenize.STRING:
8 # 'token' is the whole un-parsed token; we can look at the start
9 # of it to see whether it's a raw or unicode string etc.
10 self.process_string_token(token, start[0], start[1])
11 # We figure the next token, ignoring comments & newlines:
12 j = i + 1
13 while j < len(tokens) and tokens[j].type in (
14 tokenize.NEWLINE,
15 tokenize.NL,
16 tokenize.COMMENT,
17 ):
18 j += 1
19 next_token = tokens[j] if j < len(tokens) else None
20 if encoding != "ascii":
21 # We convert `tokenize` character count into a byte count,
22 # to match with astroid `.col_offset`
23 start = (start[0], len(line[: start[1]].encode(encoding)))
24 self.string_tokens[start] = (str_eval(token), next_token)
25
26 if self.linter.config.check_quote_consistency:
27 self.check_for_consistent_string_delimiters(tokens)
Path 2: 249 calls (0.22)
list (249)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 encoding = "ascii"
3 for i, (token_type, token, start, _, line) in enumerate(tokens):
4 if token_type == tokenize.ENCODING:
5 # this is always the first token processed
6 encoding = token
7 elif token_type == tokenize.STRING:
8 # 'token' is the whole un-parsed token; we can look at the start
9 # of it to see whether it's a raw or unicode string etc.
10 self.process_string_token(token, start[0], start[1])
11 # We figure the next token, ignoring comments & newlines:
12 j = i + 1
13 while j < len(tokens) and tokens[j].type in (
14 tokenize.NEWLINE,
15 tokenize.NL,
16 tokenize.COMMENT,
17 ):
18 j += 1
19 next_token = tokens[j] if j < len(tokens) else None
20 if encoding != "ascii":
21 # We convert `tokenize` character count into a byte count,
22 # to match with astroid `.col_offset`
23 start = (start[0], len(line[: start[1]].encode(encoding)))
24 self.string_tokens[start] = (str_eval(token), next_token)
25
26 if self.linter.config.check_quote_consistency:
27 self.check_for_consistent_string_delimiters(tokens)
Path 3: 55 calls (0.05)
list (55)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 encoding = "ascii"
3 for i, (token_type, token, start, _, line) in enumerate(tokens):
4 if token_type == tokenize.ENCODING:
5 # this is always the first token processed
6 encoding = token
7 elif token_type == tokenize.STRING:
8 # 'token' is the whole un-parsed token; we can look at the start
9 # of it to see whether it's a raw or unicode string etc.
10 self.process_string_token(token, start[0], start[1])
11 # We figure the next token, ignoring comments & newlines:
12 j = i + 1
13 while j < len(tokens) and tokens[j].type in (
14 tokenize.NEWLINE,
15 tokenize.NL,
16 tokenize.COMMENT,
17 ):
18 j += 1
19 next_token = tokens[j] if j < len(tokens) else None
20 if encoding != "ascii":
21 # We convert `tokenize` character count into a byte count,
22 # to match with astroid `.col_offset`
23 start = (start[0], len(line[: start[1]].encode(encoding)))
24 self.string_tokens[start] = (str_eval(token), next_token)
25
26 if self.linter.config.check_quote_consistency:
27 self.check_for_consistent_string_delimiters(tokens)
Path 4: 2 calls (0.0)
list (2)
1def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
2 encoding = "ascii"
3 for i, (token_type, token, start, _, line) in enumerate(tokens):
4 if token_type == tokenize.ENCODING:
5 # this is always the first token processed
6 encoding = token
7 elif token_type == tokenize.STRING:
8 # 'token' is the whole un-parsed token; we can look at the start
9 # of it to see whether it's a raw or unicode string etc.
10 self.process_string_token(token, start[0], start[1])
11 # We figure the next token, ignoring comments & newlines:
12 j = i + 1
13 while j < len(tokens) and tokens[j].type in (
14 tokenize.NEWLINE,
15 tokenize.NL,
16 tokenize.COMMENT,
17 ):
18 j += 1
19 next_token = tokens[j] if j < len(tokens) else None
20 if encoding != "ascii":
21 # We convert `tokenize` character count into a byte count,
22 # to match with astroid `.col_offset`
23 start = (start[0], len(line[: start[1]].encode(encoding)))
24 self.string_tokens[start] = (str_eval(token), next_token)
25
26 if self.linter.config.check_quote_consistency:
27 self.check_for_consistent_string_delimiters(tokens)