Speed up blib2to3 tokenization using startswith with a tuple (#4541)

This commit is contained in:
Tony Wang 2024-12-30 09:17:50 +08:00 committed by GitHub
parent 9431e98522
commit fdabd424e2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 3 additions and 2 deletions

View File

@ -43,6 +43,7 @@
### Performance
<!-- Changes that improve Black's performance. -->
- Speed up the `is_fstring_start` function in Black's tokenizer (#4541)
### Output

View File

@ -221,7 +221,7 @@ def _combinations(*l: str) -> set[str]:
| {f"{prefix}'" for prefix in _strprefixes | _fstring_prefixes}
| {f'{prefix}"' for prefix in _strprefixes | _fstring_prefixes}
)
fstring_prefix: Final = (
fstring_prefix: Final = tuple(
{f"{prefix}'" for prefix in _fstring_prefixes}
| {f'{prefix}"' for prefix in _fstring_prefixes}
| {f"{prefix}'''" for prefix in _fstring_prefixes}
@ -459,7 +459,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> str:
def is_fstring_start(token: str) -> bool:
return builtins.any(token.startswith(prefix) for prefix in fstring_prefix)
return token.startswith(fstring_prefix)
def _split_fstring_start_and_middle(token: str) -> tuple[str, str]: