Path 1: 3246 calls (0.55)

"''" (99) '"hi"' (65) '""' (57) '"a"' (54) "'1'" (53) "'TEST'" (51) '"http://localhost"' (50) "'a'" (44) '"utf-8"' (42) '"b"' (36)

'' (156) 'a' (98) 'hi' (71) 'b' (68) '1' (61) 'foo' (56) 'TEST' (54) 'key' (50) 'http://localhost' (50) '2' (48)

1def str_eval(token: str) -> str:
2    """Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit.
3
4    This supports f-strings, contrary to `ast.literal_eval`.
5    We have to support all string literal notations:
6    https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
7    """
8    if token[0:2].lower() in {"fr", "rf"}:
9        token = token[2:]
10    elif token[0].lower() in {"r", "u", "f"}:
11        token = token[1:]
12    if token[0:3] in {'"""', "'''"}:
13        return token[3:-3]
14    return token[1:-1]
            

Path 2: 2519 calls (0.43)

'"""Perfect module with only documentation for configuration tests."""' (50) '"""docstring"""' (33) '"""hehehe"""' (22) '""""\n pylint score: +7.5...

'Perfect module with only documentation for configuration tests.' (50) 'docstring' (33) 'hehehe' (22) '"\n pylint score: +7.50\n' (20) '\n Pyli...

1def str_eval(token: str) -> str:
2    """Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit.
3
4    This supports f-strings, contrary to `ast.literal_eval`.
5    We have to support all string literal notations:
6    https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
7    """
8    if token[0:2].lower() in {"fr", "rf"}:
9        token = token[2:]
10    elif token[0].lower() in {"r", "u", "f"}:
11        token = token[1:]
12    if token[0:3] in {'"""', "'''"}:
13        return token[3:-3]
14    return token[1:-1]
            

Path 3: 126 calls (0.02)

'f"Eating a fruit named {fruit_name} with {condiment}"' (4) 'u"String"' (4) 'f"{position[0]},{position[1]}"' (4) 'f"{THING1}, plus {THING2}"' (2) 'f"S...

'X' (6) 'String' (5) 'Eating a fruit named {fruit_name} with {condiment}' (4) '{position[0]},{position[1]}' (4) '{THING1}, plus {THING2}' (2) 'Selecte...

1def str_eval(token: str) -> str:
2    """Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit.
3
4    This supports f-strings, contrary to `ast.literal_eval`.
5    We have to support all string literal notations:
6    https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
7    """
8    if token[0:2].lower() in {"fr", "rf"}:
9        token = token[2:]
10    elif token[0].lower() in {"r", "u", "f"}:
11        token = token[1:]
12    if token[0:3] in {'"""', "'''"}:
13        return token[3:-3]
14    return token[1:-1]
            

Path 4: 10 calls (0.0)

'r"""The Sphinx docstring\n In Sphinx docstrings asterisks should be escaped.\n See https://github.com/PyCQA/pylint/issues/5406\n\n :param na...

'The Sphinx docstring\n In Sphinx docstrings asterisks should be escaped.\n See https://github.com/PyCQA/pylint/issues/5406\n\n :param named_...

1def str_eval(token: str) -> str:
2    """Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit.
3
4    This supports f-strings, contrary to `ast.literal_eval`.
5    We have to support all string literal notations:
6    https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
7    """
8    if token[0:2].lower() in {"fr", "rf"}:
9        token = token[2:]
10    elif token[0].lower() in {"r", "u", "f"}:
11        token = token[1:]
12    if token[0:3] in {'"""', "'''"}:
13        return token[3:-3]
14    return token[1:-1]
            

Path 5: 8 calls (0.0)

'fr"X"' (1) 'Fr"X"' (1) 'fR"X"' (1) 'FR"X"' (1) 'rf"X"' (1) 'rF"X"' (1) 'Rf"X"' (1) 'RF"X"' (1)

'X' (8)

1def str_eval(token: str) -> str:
2    """Mostly replicate `ast.literal_eval(token)` manually to avoid any performance hit.
3
4    This supports f-strings, contrary to `ast.literal_eval`.
5    We have to support all string literal notations:
6    https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
7    """
8    if token[0:2].lower() in {"fr", "rf"}:
9        token = token[2:]
10    elif token[0].lower() in {"r", "u", "f"}:
11        token = token[1:]
12    if token[0:3] in {'"""', "'''"}:
13        return token[3:-3]
14    return token[1:-1]