Use builtin generics (#4458)
uvx ruff check --output-format concise src --target-version py39 --select UP006 --fix --unsafe-fixes uvx ruff check --output-format concise src --target-version py39 --select F401 --fix plus some manual fixups
This commit is contained in:
parent
2a45cecf29
commit
8fb2add1f7
@ -14,17 +14,13 @@
|
|||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
Collection,
|
Collection,
|
||||||
Dict,
|
|
||||||
Generator,
|
Generator,
|
||||||
Iterator,
|
Iterator,
|
||||||
List,
|
|
||||||
MutableMapping,
|
MutableMapping,
|
||||||
Optional,
|
Optional,
|
||||||
Pattern,
|
Pattern,
|
||||||
Sequence,
|
Sequence,
|
||||||
Set,
|
|
||||||
Sized,
|
Sized,
|
||||||
Tuple,
|
|
||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -176,7 +172,7 @@ def read_pyproject_toml(
|
|||||||
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
|
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
|
||||||
)
|
)
|
||||||
|
|
||||||
default_map: Dict[str, Any] = {}
|
default_map: dict[str, Any] = {}
|
||||||
if ctx.default_map:
|
if ctx.default_map:
|
||||||
default_map.update(ctx.default_map)
|
default_map.update(ctx.default_map)
|
||||||
default_map.update(config)
|
default_map.update(config)
|
||||||
@ -186,9 +182,9 @@ def read_pyproject_toml(
|
|||||||
|
|
||||||
|
|
||||||
def spellcheck_pyproject_toml_keys(
|
def spellcheck_pyproject_toml_keys(
|
||||||
ctx: click.Context, config_keys: List[str], config_file_path: str
|
ctx: click.Context, config_keys: list[str], config_file_path: str
|
||||||
) -> None:
|
) -> None:
|
||||||
invalid_keys: List[str] = []
|
invalid_keys: list[str] = []
|
||||||
available_config_options = {param.name for param in ctx.command.params}
|
available_config_options = {param.name for param in ctx.command.params}
|
||||||
for key in config_keys:
|
for key in config_keys:
|
||||||
if key not in available_config_options:
|
if key not in available_config_options:
|
||||||
@ -202,8 +198,8 @@ def spellcheck_pyproject_toml_keys(
|
|||||||
|
|
||||||
|
|
||||||
def target_version_option_callback(
|
def target_version_option_callback(
|
||||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||||
) -> List[TargetVersion]:
|
) -> list[TargetVersion]:
|
||||||
"""Compute the target versions from a --target-version flag.
|
"""Compute the target versions from a --target-version flag.
|
||||||
|
|
||||||
This is its own function because mypy couldn't infer the type correctly
|
This is its own function because mypy couldn't infer the type correctly
|
||||||
@ -213,8 +209,8 @@ def target_version_option_callback(
|
|||||||
|
|
||||||
|
|
||||||
def enable_unstable_feature_callback(
|
def enable_unstable_feature_callback(
|
||||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||||
) -> List[Preview]:
|
) -> list[Preview]:
|
||||||
"""Compute the features from an --enable-unstable-feature flag."""
|
"""Compute the features from an --enable-unstable-feature flag."""
|
||||||
return [Preview[val] for val in v]
|
return [Preview[val] for val in v]
|
||||||
|
|
||||||
@ -519,7 +515,7 @@ def main( # noqa: C901
|
|||||||
ctx: click.Context,
|
ctx: click.Context,
|
||||||
code: Optional[str],
|
code: Optional[str],
|
||||||
line_length: int,
|
line_length: int,
|
||||||
target_version: List[TargetVersion],
|
target_version: list[TargetVersion],
|
||||||
check: bool,
|
check: bool,
|
||||||
diff: bool,
|
diff: bool,
|
||||||
line_ranges: Sequence[str],
|
line_ranges: Sequence[str],
|
||||||
@ -533,7 +529,7 @@ def main( # noqa: C901
|
|||||||
skip_magic_trailing_comma: bool,
|
skip_magic_trailing_comma: bool,
|
||||||
preview: bool,
|
preview: bool,
|
||||||
unstable: bool,
|
unstable: bool,
|
||||||
enable_unstable_feature: List[Preview],
|
enable_unstable_feature: list[Preview],
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
required_version: Optional[str],
|
required_version: Optional[str],
|
||||||
@ -543,7 +539,7 @@ def main( # noqa: C901
|
|||||||
force_exclude: Optional[Pattern[str]],
|
force_exclude: Optional[Pattern[str]],
|
||||||
stdin_filename: Optional[str],
|
stdin_filename: Optional[str],
|
||||||
workers: Optional[int],
|
workers: Optional[int],
|
||||||
src: Tuple[str, ...],
|
src: tuple[str, ...],
|
||||||
config: Optional[str],
|
config: Optional[str],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""The uncompromising code formatter."""
|
"""The uncompromising code formatter."""
|
||||||
@ -643,7 +639,7 @@ def main( # noqa: C901
|
|||||||
enabled_features=set(enable_unstable_feature),
|
enabled_features=set(enable_unstable_feature),
|
||||||
)
|
)
|
||||||
|
|
||||||
lines: List[Tuple[int, int]] = []
|
lines: list[tuple[int, int]] = []
|
||||||
if line_ranges:
|
if line_ranges:
|
||||||
if ipynb:
|
if ipynb:
|
||||||
err("Cannot use --line-ranges with ipynb files.")
|
err("Cannot use --line-ranges with ipynb files.")
|
||||||
@ -733,7 +729,7 @@ def main( # noqa: C901
|
|||||||
def get_sources(
|
def get_sources(
|
||||||
*,
|
*,
|
||||||
root: Path,
|
root: Path,
|
||||||
src: Tuple[str, ...],
|
src: tuple[str, ...],
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
include: Pattern[str],
|
include: Pattern[str],
|
||||||
@ -742,14 +738,14 @@ def get_sources(
|
|||||||
force_exclude: Optional[Pattern[str]],
|
force_exclude: Optional[Pattern[str]],
|
||||||
report: "Report",
|
report: "Report",
|
||||||
stdin_filename: Optional[str],
|
stdin_filename: Optional[str],
|
||||||
) -> Set[Path]:
|
) -> set[Path]:
|
||||||
"""Compute the set of files to be formatted."""
|
"""Compute the set of files to be formatted."""
|
||||||
sources: Set[Path] = set()
|
sources: set[Path] = set()
|
||||||
|
|
||||||
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
||||||
using_default_exclude = exclude is None
|
using_default_exclude = exclude is None
|
||||||
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
|
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
|
||||||
gitignore: Optional[Dict[Path, PathSpec]] = None
|
gitignore: Optional[dict[Path, PathSpec]] = None
|
||||||
root_gitignore = get_gitignore(root)
|
root_gitignore = get_gitignore(root)
|
||||||
|
|
||||||
for s in src:
|
for s in src:
|
||||||
@ -841,7 +837,7 @@ def reformat_code(
|
|||||||
mode: Mode,
|
mode: Mode,
|
||||||
report: Report,
|
report: Report,
|
||||||
*,
|
*,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Reformat and print out `content` without spawning child processes.
|
Reformat and print out `content` without spawning child processes.
|
||||||
@ -874,7 +870,7 @@ def reformat_one(
|
|||||||
mode: Mode,
|
mode: Mode,
|
||||||
report: "Report",
|
report: "Report",
|
||||||
*,
|
*,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Reformat a single file under `src` without spawning child processes.
|
"""Reformat a single file under `src` without spawning child processes.
|
||||||
|
|
||||||
@ -930,7 +926,7 @@ def format_file_in_place(
|
|||||||
write_back: WriteBack = WriteBack.NO,
|
write_back: WriteBack = WriteBack.NO,
|
||||||
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
|
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
|
||||||
*,
|
*,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Format file under `src` path. Return True if changed.
|
"""Format file under `src` path. Return True if changed.
|
||||||
|
|
||||||
@ -997,7 +993,7 @@ def format_stdin_to_stdout(
|
|||||||
content: Optional[str] = None,
|
content: Optional[str] = None,
|
||||||
write_back: WriteBack = WriteBack.NO,
|
write_back: WriteBack = WriteBack.NO,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Format file on stdin. Return True if changed.
|
"""Format file on stdin. Return True if changed.
|
||||||
|
|
||||||
@ -1048,7 +1044,7 @@ def check_stability_and_equivalence(
|
|||||||
dst_contents: str,
|
dst_contents: str,
|
||||||
*,
|
*,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Perform stability and equivalence checks.
|
"""Perform stability and equivalence checks.
|
||||||
|
|
||||||
@ -1065,7 +1061,7 @@ def format_file_contents(
|
|||||||
*,
|
*,
|
||||||
fast: bool,
|
fast: bool,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> FileContent:
|
) -> FileContent:
|
||||||
"""Reformat contents of a file and return new contents.
|
"""Reformat contents of a file and return new contents.
|
||||||
|
|
||||||
@ -1196,7 +1192,7 @@ def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileCon
|
|||||||
|
|
||||||
|
|
||||||
def format_str(
|
def format_str(
|
||||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Reformat a string and return new contents.
|
"""Reformat a string and return new contents.
|
||||||
|
|
||||||
@ -1243,10 +1239,10 @@ def f(
|
|||||||
|
|
||||||
|
|
||||||
def _format_str_once(
|
def _format_str_once(
|
||||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||||
) -> str:
|
) -> str:
|
||||||
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
|
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
|
||||||
dst_blocks: List[LinesBlock] = []
|
dst_blocks: list[LinesBlock] = []
|
||||||
if mode.target_versions:
|
if mode.target_versions:
|
||||||
versions = mode.target_versions
|
versions = mode.target_versions
|
||||||
else:
|
else:
|
||||||
@ -1296,7 +1292,7 @@ def _format_str_once(
|
|||||||
return "".join(dst_contents)
|
return "".join(dst_contents)
|
||||||
|
|
||||||
|
|
||||||
def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
def decode_bytes(src: bytes) -> tuple[FileContent, Encoding, NewLine]:
|
||||||
"""Return a tuple of (decoded_contents, encoding, newline).
|
"""Return a tuple of (decoded_contents, encoding, newline).
|
||||||
|
|
||||||
`newline` is either CRLF or LF but `decoded_contents` is decoded with
|
`newline` is either CRLF or LF but `decoded_contents` is decoded with
|
||||||
@ -1314,8 +1310,8 @@ def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
|||||||
|
|
||||||
|
|
||||||
def get_features_used( # noqa: C901
|
def get_features_used( # noqa: C901
|
||||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
node: Node, *, future_imports: Optional[set[str]] = None
|
||||||
) -> Set[Feature]:
|
) -> set[Feature]:
|
||||||
"""Return a set of (relatively) new Python features used in this file.
|
"""Return a set of (relatively) new Python features used in this file.
|
||||||
|
|
||||||
Currently looking for:
|
Currently looking for:
|
||||||
@ -1333,7 +1329,7 @@ def get_features_used( # noqa: C901
|
|||||||
- except* clause;
|
- except* clause;
|
||||||
- variadic generics;
|
- variadic generics;
|
||||||
"""
|
"""
|
||||||
features: Set[Feature] = set()
|
features: set[Feature] = set()
|
||||||
if future_imports:
|
if future_imports:
|
||||||
features |= {
|
features |= {
|
||||||
FUTURE_FLAG_TO_FEATURE[future_import]
|
FUTURE_FLAG_TO_FEATURE[future_import]
|
||||||
@ -1471,8 +1467,8 @@ def _contains_asexpr(node: Union[Node, Leaf]) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
def detect_target_versions(
|
def detect_target_versions(
|
||||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
node: Node, *, future_imports: Optional[set[str]] = None
|
||||||
) -> Set[TargetVersion]:
|
) -> set[TargetVersion]:
|
||||||
"""Detect the version to target based on the nodes used."""
|
"""Detect the version to target based on the nodes used."""
|
||||||
features = get_features_used(node, future_imports=future_imports)
|
features = get_features_used(node, future_imports=future_imports)
|
||||||
return {
|
return {
|
||||||
@ -1480,11 +1476,11 @@ def detect_target_versions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_future_imports(node: Node) -> Set[str]:
|
def get_future_imports(node: Node) -> set[str]:
|
||||||
"""Return a set of __future__ imports in the file."""
|
"""Return a set of __future__ imports in the file."""
|
||||||
imports: Set[str] = set()
|
imports: set[str] = set()
|
||||||
|
|
||||||
def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
|
def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]:
|
||||||
for child in children:
|
for child in children:
|
||||||
if isinstance(child, Leaf):
|
if isinstance(child, Leaf):
|
||||||
if child.type == token.NAME:
|
if child.type == token.NAME:
|
||||||
@ -1571,7 +1567,7 @@ def assert_equivalent(src: str, dst: str) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def assert_stable(
|
def assert_stable(
|
||||||
src: str, dst: str, mode: Mode, *, lines: Collection[Tuple[int, int]] = ()
|
src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = ()
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Raise AssertionError if `dst` reformats differently the second time."""
|
"""Raise AssertionError if `dst` reformats differently the second time."""
|
||||||
if lines:
|
if lines:
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# Generated by make_width_table.py
|
# Generated by make_width_table.py
|
||||||
# wcwidth 0.2.6
|
# wcwidth 0.2.6
|
||||||
# Unicode 15.0.0
|
# Unicode 15.0.0
|
||||||
from typing import Final, List, Tuple
|
from typing import Final
|
||||||
|
|
||||||
WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [
|
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
|
||||||
(0, 0, 0),
|
(0, 0, 0),
|
||||||
(1, 31, -1),
|
(1, 31, -1),
|
||||||
(127, 159, -1),
|
(127, 159, -1),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""Builds on top of nodes.py to track brackets."""
|
"""Builds on top of nodes.py to track brackets."""
|
||||||
|
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import Dict, Final, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
from typing import Final, Iterable, Optional, Sequence, Union
|
||||||
|
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
BRACKET,
|
BRACKET,
|
||||||
@ -60,12 +60,12 @@ class BracketTracker:
|
|||||||
"""Keeps track of brackets on a line."""
|
"""Keeps track of brackets on a line."""
|
||||||
|
|
||||||
depth: int = 0
|
depth: int = 0
|
||||||
bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||||
delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
|
delimiters: dict[LeafID, Priority] = field(default_factory=dict)
|
||||||
previous: Optional[Leaf] = None
|
previous: Optional[Leaf] = None
|
||||||
_for_loop_depths: List[int] = field(default_factory=list)
|
_for_loop_depths: list[int] = field(default_factory=list)
|
||||||
_lambda_argument_depths: List[int] = field(default_factory=list)
|
_lambda_argument_depths: list[int] = field(default_factory=list)
|
||||||
invisible: List[Leaf] = field(default_factory=list)
|
invisible: list[Leaf] = field(default_factory=list)
|
||||||
|
|
||||||
def mark(self, leaf: Leaf) -> None:
|
def mark(self, leaf: Leaf) -> None:
|
||||||
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
||||||
@ -353,7 +353,7 @@ def max_delimiter_priority_in_atom(node: LN) -> Priority:
|
|||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> Set[LeafID]:
|
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]:
|
||||||
"""Return leaves that are inside matching brackets.
|
"""Return leaves that are inside matching brackets.
|
||||||
|
|
||||||
The input `leaves` can have non-matching brackets at the head or tail parts.
|
The input `leaves` can have non-matching brackets at the head or tail parts.
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
import tempfile
|
import tempfile
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Iterable, NamedTuple, Set, Tuple
|
from typing import Iterable, NamedTuple
|
||||||
|
|
||||||
from platformdirs import user_cache_dir
|
from platformdirs import user_cache_dir
|
||||||
|
|
||||||
@ -55,7 +55,7 @@ def get_cache_file(mode: Mode) -> Path:
|
|||||||
class Cache:
|
class Cache:
|
||||||
mode: Mode
|
mode: Mode
|
||||||
cache_file: Path
|
cache_file: Path
|
||||||
file_data: Dict[str, FileData] = field(default_factory=dict)
|
file_data: dict[str, FileData] = field(default_factory=dict)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def read(cls, mode: Mode) -> Self:
|
def read(cls, mode: Mode) -> Self:
|
||||||
@ -76,7 +76,7 @@ def read(cls, mode: Mode) -> Self:
|
|||||||
|
|
||||||
with cache_file.open("rb") as fobj:
|
with cache_file.open("rb") as fobj:
|
||||||
try:
|
try:
|
||||||
data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)
|
data: dict[str, tuple[float, int, str]] = pickle.load(fobj)
|
||||||
file_data = {k: FileData(*v) for k, v in data.items()}
|
file_data = {k: FileData(*v) for k, v in data.items()}
|
||||||
except (pickle.UnpicklingError, ValueError, IndexError):
|
except (pickle.UnpicklingError, ValueError, IndexError):
|
||||||
return cls(mode, cache_file)
|
return cls(mode, cache_file)
|
||||||
@ -114,14 +114,14 @@ def is_changed(self, source: Path) -> bool:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
|
def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]:
|
||||||
"""Split an iterable of paths in `sources` into two sets.
|
"""Split an iterable of paths in `sources` into two sets.
|
||||||
|
|
||||||
The first contains paths of files that modified on disk or are not in the
|
The first contains paths of files that modified on disk or are not in the
|
||||||
cache. The other contains paths to non-modified files.
|
cache. The other contains paths to non-modified files.
|
||||||
"""
|
"""
|
||||||
changed: Set[Path] = set()
|
changed: set[Path] = set()
|
||||||
done: Set[Path] = set()
|
done: set[Path] = set()
|
||||||
for src in sources:
|
for src in sources:
|
||||||
if self.is_changed(src):
|
if self.is_changed(src):
|
||||||
changed.add(src)
|
changed.add(src)
|
||||||
@ -140,7 +140,7 @@ def write(self, sources: Iterable[Path]) -> None:
|
|||||||
dir=str(self.cache_file.parent), delete=False
|
dir=str(self.cache_file.parent), delete=False
|
||||||
) as f:
|
) as f:
|
||||||
# We store raw tuples in the cache because it's faster.
|
# We store raw tuples in the cache because it's faster.
|
||||||
data: Dict[str, Tuple[float, int, str]] = {
|
data: dict[str, tuple[float, int, str]] = {
|
||||||
k: (*v,) for k, v in self.file_data.items()
|
k: (*v,) for k, v in self.file_data.items()
|
||||||
}
|
}
|
||||||
pickle.dump(data, f, protocol=4)
|
pickle.dump(data, f, protocol=4)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from typing import Collection, Final, Iterator, List, Optional, Tuple, Union
|
from typing import Collection, Final, Iterator, Optional, Union
|
||||||
|
|
||||||
from black.mode import Mode, Preview
|
from black.mode import Mode, Preview
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
@ -77,9 +77,9 @@ def generate_comments(leaf: LN) -> Iterator[Leaf]:
|
|||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=4096)
|
@lru_cache(maxsize=4096)
|
||||||
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
|
def list_comments(prefix: str, *, is_endmarker: bool) -> list[ProtoComment]:
|
||||||
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
||||||
result: List[ProtoComment] = []
|
result: list[ProtoComment] = []
|
||||||
if not prefix or "#" not in prefix:
|
if not prefix or "#" not in prefix:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ def make_comment(content: str) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def normalize_fmt_off(
|
def normalize_fmt_off(
|
||||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
||||||
try_again = True
|
try_again = True
|
||||||
@ -175,7 +175,7 @@ def normalize_fmt_off(
|
|||||||
|
|
||||||
|
|
||||||
def convert_one_fmt_off_pair(
|
def convert_one_fmt_off_pair(
|
||||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
||||||
|
|
||||||
@ -336,7 +336,7 @@ def _generate_ignored_nodes_from_fmt_skip(
|
|||||||
# statements. The ignored nodes should be previous siblings of the
|
# statements. The ignored nodes should be previous siblings of the
|
||||||
# parent suite node.
|
# parent suite node.
|
||||||
leaf.prefix = ""
|
leaf.prefix = ""
|
||||||
ignored_nodes: List[LN] = []
|
ignored_nodes: list[LN] = []
|
||||||
parent_sibling = parent.prev_sibling
|
parent_sibling = parent.prev_sibling
|
||||||
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
||||||
ignored_nodes.insert(0, parent_sibling)
|
ignored_nodes.insert(0, parent_sibling)
|
||||||
@ -376,7 +376,7 @@ def children_contains_fmt_on(container: LN) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def contains_pragma_comment(comment_list: List[Leaf]) -> bool:
|
def contains_pragma_comment(comment_list: list[Leaf]) -> bool:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
True iff one of the comments in @comment_list is a pragma used by one
|
True iff one of the comments in @comment_list is a pragma used by one
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
||||||
from multiprocessing import Manager
|
from multiprocessing import Manager
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Iterable, Optional, Set
|
from typing import Any, Iterable, Optional
|
||||||
|
|
||||||
from mypy_extensions import mypyc_attr
|
from mypy_extensions import mypyc_attr
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ def shutdown(loop: asyncio.AbstractEventLoop) -> None:
|
|||||||
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
||||||
@mypyc_attr(patchable=True)
|
@mypyc_attr(patchable=True)
|
||||||
def reformat_many(
|
def reformat_many(
|
||||||
sources: Set[Path],
|
sources: set[Path],
|
||||||
fast: bool,
|
fast: bool,
|
||||||
write_back: WriteBack,
|
write_back: WriteBack,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
@ -119,7 +119,7 @@ def reformat_many(
|
|||||||
|
|
||||||
|
|
||||||
async def schedule_formatting(
|
async def schedule_formatting(
|
||||||
sources: Set[Path],
|
sources: set[Path],
|
||||||
fast: bool,
|
fast: bool,
|
||||||
write_back: WriteBack,
|
write_back: WriteBack,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import Any, Iterator, List, TypeVar, Union
|
from typing import Any, Iterator, TypeVar, Union
|
||||||
|
|
||||||
from black.nodes import Visitor
|
from black.nodes import Visitor
|
||||||
from black.output import out
|
from black.output import out
|
||||||
@ -14,7 +14,7 @@
|
|||||||
@dataclass
|
@dataclass
|
||||||
class DebugVisitor(Visitor[T]):
|
class DebugVisitor(Visitor[T]):
|
||||||
tree_depth: int = 0
|
tree_depth: int = 0
|
||||||
list_output: List[str] = field(default_factory=list)
|
list_output: list[str] = field(default_factory=list)
|
||||||
print_output: bool = True
|
print_output: bool = True
|
||||||
|
|
||||||
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
||||||
|
@ -6,14 +6,11 @@
|
|||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Any,
|
Any,
|
||||||
Dict,
|
|
||||||
Iterable,
|
Iterable,
|
||||||
Iterator,
|
Iterator,
|
||||||
List,
|
|
||||||
Optional,
|
Optional,
|
||||||
Pattern,
|
Pattern,
|
||||||
Sequence,
|
Sequence,
|
||||||
Tuple,
|
|
||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,7 +40,7 @@
|
|||||||
|
|
||||||
|
|
||||||
@lru_cache
|
@lru_cache
|
||||||
def _load_toml(path: Union[Path, str]) -> Dict[str, Any]:
|
def _load_toml(path: Union[Path, str]) -> dict[str, Any]:
|
||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
return tomllib.load(f)
|
return tomllib.load(f)
|
||||||
|
|
||||||
@ -56,7 +53,7 @@ def _cached_resolve(path: Path) -> Path:
|
|||||||
@lru_cache
|
@lru_cache
|
||||||
def find_project_root(
|
def find_project_root(
|
||||||
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
||||||
) -> Tuple[Path, str]:
|
) -> tuple[Path, str]:
|
||||||
"""Return a directory containing .git, .hg, or pyproject.toml.
|
"""Return a directory containing .git, .hg, or pyproject.toml.
|
||||||
|
|
||||||
pyproject.toml files are only considered if they contain a [tool.black]
|
pyproject.toml files are only considered if they contain a [tool.black]
|
||||||
@ -106,7 +103,7 @@ def find_project_root(
|
|||||||
|
|
||||||
|
|
||||||
def find_pyproject_toml(
|
def find_pyproject_toml(
|
||||||
path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None
|
path_search_start: tuple[str, ...], stdin_filename: Optional[str] = None
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
||||||
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
||||||
@ -128,13 +125,13 @@ def find_pyproject_toml(
|
|||||||
|
|
||||||
|
|
||||||
@mypyc_attr(patchable=True)
|
@mypyc_attr(patchable=True)
|
||||||
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
def parse_pyproject_toml(path_config: str) -> dict[str, Any]:
|
||||||
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
||||||
|
|
||||||
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
||||||
"""
|
"""
|
||||||
pyproject_toml = _load_toml(path_config)
|
pyproject_toml = _load_toml(path_config)
|
||||||
config: Dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||||
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
||||||
|
|
||||||
if "target_version" not in config:
|
if "target_version" not in config:
|
||||||
@ -146,8 +143,8 @@ def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
|||||||
|
|
||||||
|
|
||||||
def infer_target_version(
|
def infer_target_version(
|
||||||
pyproject_toml: Dict[str, Any],
|
pyproject_toml: dict[str, Any],
|
||||||
) -> Optional[List[TargetVersion]]:
|
) -> Optional[list[TargetVersion]]:
|
||||||
"""Infer Black's target version from the project metadata in pyproject.toml.
|
"""Infer Black's target version from the project metadata in pyproject.toml.
|
||||||
|
|
||||||
Supports the PyPA standard format (PEP 621):
|
Supports the PyPA standard format (PEP 621):
|
||||||
@ -170,7 +167,7 @@ def infer_target_version(
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]:
|
def parse_req_python_version(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||||
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
||||||
|
|
||||||
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
||||||
@ -185,7 +182,7 @@ def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersio
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]:
|
def parse_req_python_specifier(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||||
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
||||||
|
|
||||||
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
||||||
@ -196,7 +193,7 @@ def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVers
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
||||||
compatible_versions: List[str] = list(specifier_set.filter(target_version_map))
|
compatible_versions: list[str] = list(specifier_set.filter(target_version_map))
|
||||||
if compatible_versions:
|
if compatible_versions:
|
||||||
return [target_version_map[v] for v in compatible_versions]
|
return [target_version_map[v] for v in compatible_versions]
|
||||||
return None
|
return None
|
||||||
@ -251,7 +248,7 @@ def find_user_pyproject_toml() -> Path:
|
|||||||
def get_gitignore(root: Path) -> PathSpec:
|
def get_gitignore(root: Path) -> PathSpec:
|
||||||
"""Return a PathSpec matching gitignore content if present."""
|
"""Return a PathSpec matching gitignore content if present."""
|
||||||
gitignore = root / ".gitignore"
|
gitignore = root / ".gitignore"
|
||||||
lines: List[str] = []
|
lines: list[str] = []
|
||||||
if gitignore.is_file():
|
if gitignore.is_file():
|
||||||
with gitignore.open(encoding="utf-8") as gf:
|
with gitignore.open(encoding="utf-8") as gf:
|
||||||
lines = gf.readlines()
|
lines = gf.readlines()
|
||||||
@ -302,7 +299,7 @@ def best_effort_relative_path(path: Path, root: Path) -> Path:
|
|||||||
def _path_is_ignored(
|
def _path_is_ignored(
|
||||||
root_relative_path: str,
|
root_relative_path: str,
|
||||||
root: Path,
|
root: Path,
|
||||||
gitignore_dict: Dict[Path, PathSpec],
|
gitignore_dict: dict[Path, PathSpec],
|
||||||
) -> bool:
|
) -> bool:
|
||||||
path = root / root_relative_path
|
path = root / root_relative_path
|
||||||
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
||||||
@ -335,7 +332,7 @@ def gen_python_files(
|
|||||||
extend_exclude: Optional[Pattern[str]],
|
extend_exclude: Optional[Pattern[str]],
|
||||||
force_exclude: Optional[Pattern[str]],
|
force_exclude: Optional[Pattern[str]],
|
||||||
report: Report,
|
report: Report,
|
||||||
gitignore_dict: Optional[Dict[Path, PathSpec]],
|
gitignore_dict: Optional[dict[Path, PathSpec]],
|
||||||
*,
|
*,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
import sys
|
import sys
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from importlib.util import find_spec
|
from importlib.util import find_spec
|
||||||
from typing import Dict, List, Optional, Tuple
|
from typing import Optional
|
||||||
|
|
||||||
if sys.version_info >= (3, 10):
|
if sys.version_info >= (3, 10):
|
||||||
from typing import TypeGuard
|
from typing import TypeGuard
|
||||||
@ -64,7 +64,7 @@ def jupyter_dependencies_are_installed(*, warn: bool) -> bool:
|
|||||||
return installed
|
return installed
|
||||||
|
|
||||||
|
|
||||||
def remove_trailing_semicolon(src: str) -> Tuple[str, bool]:
|
def remove_trailing_semicolon(src: str) -> tuple[str, bool]:
|
||||||
"""Remove trailing semicolon from Jupyter notebook cell.
|
"""Remove trailing semicolon from Jupyter notebook cell.
|
||||||
|
|
||||||
For example,
|
For example,
|
||||||
@ -120,7 +120,7 @@ def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
|
|||||||
return str(tokens_to_src(tokens))
|
return str(tokens_to_src(tokens))
|
||||||
|
|
||||||
|
|
||||||
def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
def mask_cell(src: str) -> tuple[str, list[Replacement]]:
|
||||||
"""Mask IPython magics so content becomes parseable Python code.
|
"""Mask IPython magics so content becomes parseable Python code.
|
||||||
|
|
||||||
For example,
|
For example,
|
||||||
@ -135,7 +135,7 @@ def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
|
|
||||||
The replacements are returned, along with the transformed code.
|
The replacements are returned, along with the transformed code.
|
||||||
"""
|
"""
|
||||||
replacements: List[Replacement] = []
|
replacements: list[Replacement] = []
|
||||||
try:
|
try:
|
||||||
ast.parse(src)
|
ast.parse(src)
|
||||||
except SyntaxError:
|
except SyntaxError:
|
||||||
@ -186,7 +186,7 @@ def get_token(src: str, magic: str) -> str:
|
|||||||
return f'"{token}"'
|
return f'"{token}"'
|
||||||
|
|
||||||
|
|
||||||
def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||||
"""Replace cell magic with token.
|
"""Replace cell magic with token.
|
||||||
|
|
||||||
Note that 'src' will already have been processed by IPython's
|
Note that 'src' will already have been processed by IPython's
|
||||||
@ -203,7 +203,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
|
|
||||||
The replacement, along with the transformed code, is returned.
|
The replacement, along with the transformed code, is returned.
|
||||||
"""
|
"""
|
||||||
replacements: List[Replacement] = []
|
replacements: list[Replacement] = []
|
||||||
|
|
||||||
tree = ast.parse(src)
|
tree = ast.parse(src)
|
||||||
|
|
||||||
@ -217,7 +217,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
||||||
|
|
||||||
|
|
||||||
def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
def replace_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||||
"""Replace magics within body of cell.
|
"""Replace magics within body of cell.
|
||||||
|
|
||||||
Note that 'src' will already have been processed by IPython's
|
Note that 'src' will already have been processed by IPython's
|
||||||
@ -258,7 +258,7 @@ def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
return "\n".join(new_srcs), replacements
|
return "\n".join(new_srcs), replacements
|
||||||
|
|
||||||
|
|
||||||
def unmask_cell(src: str, replacements: List[Replacement]) -> str:
|
def unmask_cell(src: str, replacements: list[Replacement]) -> str:
|
||||||
"""Remove replacements from cell.
|
"""Remove replacements from cell.
|
||||||
|
|
||||||
For example
|
For example
|
||||||
@ -291,7 +291,7 @@ def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _get_str_args(args: List[ast.expr]) -> List[str]:
|
def _get_str_args(args: list[ast.expr]) -> list[str]:
|
||||||
str_args = []
|
str_args = []
|
||||||
for arg in args:
|
for arg in args:
|
||||||
assert isinstance(arg, ast.Constant) and isinstance(arg.value, str)
|
assert isinstance(arg, ast.Constant) and isinstance(arg.value, str)
|
||||||
@ -375,7 +375,7 @@ class MagicFinder(ast.NodeVisitor):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list)
|
self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list)
|
||||||
|
|
||||||
def visit_Assign(self, node: ast.Assign) -> None:
|
def visit_Assign(self, node: ast.Assign) -> None:
|
||||||
"""Look for system assign magics.
|
"""Look for system assign magics.
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
from dataclasses import replace
|
from dataclasses import replace
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from functools import partial, wraps
|
from functools import partial, wraps
|
||||||
from typing import Collection, Iterator, List, Optional, Set, Union, cast
|
from typing import Collection, Iterator, Optional, Union, cast
|
||||||
|
|
||||||
from black.brackets import (
|
from black.brackets import (
|
||||||
COMMA_PRIORITY,
|
COMMA_PRIORITY,
|
||||||
@ -197,7 +197,7 @@ def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
|
|||||||
yield from self.line(-1)
|
yield from self.line(-1)
|
||||||
|
|
||||||
def visit_stmt(
|
def visit_stmt(
|
||||||
self, node: Node, keywords: Set[str], parens: Set[str]
|
self, node: Node, keywords: set[str], parens: set[str]
|
||||||
) -> Iterator[Line]:
|
) -> Iterator[Line]:
|
||||||
"""Visit a statement.
|
"""Visit a statement.
|
||||||
|
|
||||||
@ -559,7 +559,7 @@ def __post_init__(self) -> None:
|
|||||||
self.current_line = Line(mode=self.mode)
|
self.current_line = Line(mode=self.mode)
|
||||||
|
|
||||||
v = self.visit_stmt
|
v = self.visit_stmt
|
||||||
Ø: Set[str] = set()
|
Ø: set[str] = set()
|
||||||
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
|
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
|
||||||
self.visit_if_stmt = partial(
|
self.visit_if_stmt = partial(
|
||||||
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
|
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
|
||||||
@ -626,7 +626,7 @@ def transform_line(
|
|||||||
string_split = StringSplitter(ll, sn)
|
string_split = StringSplitter(ll, sn)
|
||||||
string_paren_wrap = StringParenWrapper(ll, sn)
|
string_paren_wrap = StringParenWrapper(ll, sn)
|
||||||
|
|
||||||
transformers: List[Transformer]
|
transformers: list[Transformer]
|
||||||
if (
|
if (
|
||||||
not line.contains_uncollapsable_type_comments()
|
not line.contains_uncollapsable_type_comments()
|
||||||
and not line.should_split_rhs
|
and not line.should_split_rhs
|
||||||
@ -726,7 +726,7 @@ def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool:
|
|||||||
"""If a funcdef has a magic trailing comma in the return type, then we should first
|
"""If a funcdef has a magic trailing comma in the return type, then we should first
|
||||||
split the line with rhs to respect the comma.
|
split the line with rhs to respect the comma.
|
||||||
"""
|
"""
|
||||||
return_type_leaves: List[Leaf] = []
|
return_type_leaves: list[Leaf] = []
|
||||||
in_return_type = False
|
in_return_type = False
|
||||||
|
|
||||||
for leaf in line.leaves:
|
for leaf in line.leaves:
|
||||||
@ -768,9 +768,9 @@ def left_hand_split(
|
|||||||
Prefer RHS otherwise. This is why this function is not symmetrical with
|
Prefer RHS otherwise. This is why this function is not symmetrical with
|
||||||
:func:`right_hand_split` which also handles optional parentheses.
|
:func:`right_hand_split` which also handles optional parentheses.
|
||||||
"""
|
"""
|
||||||
tail_leaves: List[Leaf] = []
|
tail_leaves: list[Leaf] = []
|
||||||
body_leaves: List[Leaf] = []
|
body_leaves: list[Leaf] = []
|
||||||
head_leaves: List[Leaf] = []
|
head_leaves: list[Leaf] = []
|
||||||
current_leaves = head_leaves
|
current_leaves = head_leaves
|
||||||
matching_bracket: Optional[Leaf] = None
|
matching_bracket: Optional[Leaf] = None
|
||||||
for leaf in line.leaves:
|
for leaf in line.leaves:
|
||||||
@ -836,9 +836,9 @@ def _first_right_hand_split(
|
|||||||
_maybe_split_omitting_optional_parens to get an opinion whether to prefer
|
_maybe_split_omitting_optional_parens to get an opinion whether to prefer
|
||||||
splitting on the right side of an assignment statement.
|
splitting on the right side of an assignment statement.
|
||||||
"""
|
"""
|
||||||
tail_leaves: List[Leaf] = []
|
tail_leaves: list[Leaf] = []
|
||||||
body_leaves: List[Leaf] = []
|
body_leaves: list[Leaf] = []
|
||||||
head_leaves: List[Leaf] = []
|
head_leaves: list[Leaf] = []
|
||||||
current_leaves = tail_leaves
|
current_leaves = tail_leaves
|
||||||
opening_bracket: Optional[Leaf] = None
|
opening_bracket: Optional[Leaf] = None
|
||||||
closing_bracket: Optional[Leaf] = None
|
closing_bracket: Optional[Leaf] = None
|
||||||
@ -869,8 +869,8 @@ def _first_right_hand_split(
|
|||||||
and tail_leaves[0].opening_bracket is head_leaves[-1]
|
and tail_leaves[0].opening_bracket is head_leaves[-1]
|
||||||
):
|
):
|
||||||
inner_body_leaves = list(body_leaves)
|
inner_body_leaves = list(body_leaves)
|
||||||
hugged_opening_leaves: List[Leaf] = []
|
hugged_opening_leaves: list[Leaf] = []
|
||||||
hugged_closing_leaves: List[Leaf] = []
|
hugged_closing_leaves: list[Leaf] = []
|
||||||
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
|
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
|
||||||
unpacking_offset: int = 1 if is_unpacking else 0
|
unpacking_offset: int = 1 if is_unpacking else 0
|
||||||
while (
|
while (
|
||||||
@ -1080,7 +1080,7 @@ def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None
|
|||||||
|
|
||||||
|
|
||||||
def _ensure_trailing_comma(
|
def _ensure_trailing_comma(
|
||||||
leaves: List[Leaf], original: Line, opening_bracket: Leaf
|
leaves: list[Leaf], original: Line, opening_bracket: Leaf
|
||||||
) -> bool:
|
) -> bool:
|
||||||
if not leaves:
|
if not leaves:
|
||||||
return False
|
return False
|
||||||
@ -1121,7 +1121,7 @@ def _ensure_trailing_comma(
|
|||||||
|
|
||||||
|
|
||||||
def bracket_split_build_line(
|
def bracket_split_build_line(
|
||||||
leaves: List[Leaf],
|
leaves: list[Leaf],
|
||||||
original: Line,
|
original: Line,
|
||||||
opening_bracket: Leaf,
|
opening_bracket: Leaf,
|
||||||
*,
|
*,
|
||||||
@ -1150,7 +1150,7 @@ def bracket_split_build_line(
|
|||||||
leaves.insert(i + 1, new_comma)
|
leaves.insert(i + 1, new_comma)
|
||||||
break
|
break
|
||||||
|
|
||||||
leaves_to_track: Set[LeafID] = set()
|
leaves_to_track: set[LeafID] = set()
|
||||||
if component is _BracketSplitComponent.head:
|
if component is _BracketSplitComponent.head:
|
||||||
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
||||||
# Populate the line
|
# Populate the line
|
||||||
@ -1342,7 +1342,7 @@ def append_to_line(leaf: Leaf) -> Iterator[Line]:
|
|||||||
|
|
||||||
|
|
||||||
def normalize_invisible_parens( # noqa: C901
|
def normalize_invisible_parens( # noqa: C901
|
||||||
node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature]
|
node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Make existing optional parentheses invisible or create new ones.
|
"""Make existing optional parentheses invisible or create new ones.
|
||||||
|
|
||||||
@ -1692,7 +1692,7 @@ def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
|
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]:
|
||||||
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
|
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
|
||||||
|
|
||||||
Brackets can be omitted if the entire trailer up to and including
|
Brackets can be omitted if the entire trailer up to and including
|
||||||
@ -1703,14 +1703,14 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf
|
|||||||
the one that needs to explode are omitted.
|
the one that needs to explode are omitted.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
omit: Set[LeafID] = set()
|
omit: set[LeafID] = set()
|
||||||
if not line.magic_trailing_comma:
|
if not line.magic_trailing_comma:
|
||||||
yield omit
|
yield omit
|
||||||
|
|
||||||
length = 4 * line.depth
|
length = 4 * line.depth
|
||||||
opening_bracket: Optional[Leaf] = None
|
opening_bracket: Optional[Leaf] = None
|
||||||
closing_bracket: Optional[Leaf] = None
|
closing_bracket: Optional[Leaf] = None
|
||||||
inner_brackets: Set[LeafID] = set()
|
inner_brackets: set[LeafID] = set()
|
||||||
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
|
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
|
||||||
length += leaf_length
|
length += leaf_length
|
||||||
if length > line_length:
|
if length > line_length:
|
||||||
@ -1775,10 +1775,10 @@ def run_transformer(
|
|||||||
features: Collection[Feature],
|
features: Collection[Feature],
|
||||||
*,
|
*,
|
||||||
line_str: str = "",
|
line_str: str = "",
|
||||||
) -> List[Line]:
|
) -> list[Line]:
|
||||||
if not line_str:
|
if not line_str:
|
||||||
line_str = line_to_string(line)
|
line_str = line_to_string(line)
|
||||||
result: List[Line] = []
|
result: list[Line] = []
|
||||||
for transformed_line in transform(line, features, mode):
|
for transformed_line in transform(line, features, mode):
|
||||||
if str(transformed_line).strip("\n") == line_str:
|
if str(transformed_line).strip("\n") == line_str:
|
||||||
raise CannotTransform("Line transformer returned an unchanged result")
|
raise CannotTransform("Line transformer returned an unchanged result")
|
||||||
|
@ -1,18 +1,7 @@
|
|||||||
import itertools
|
import itertools
|
||||||
import math
|
import math
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import (
|
from typing import Callable, Iterator, Optional, Sequence, TypeVar, Union, cast
|
||||||
Callable,
|
|
||||||
Dict,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Sequence,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
|
from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
|
||||||
from black.mode import Mode, Preview
|
from black.mode import Mode, Preview
|
||||||
@ -52,9 +41,9 @@ class Line:
|
|||||||
|
|
||||||
mode: Mode = field(repr=False)
|
mode: Mode = field(repr=False)
|
||||||
depth: int = 0
|
depth: int = 0
|
||||||
leaves: List[Leaf] = field(default_factory=list)
|
leaves: list[Leaf] = field(default_factory=list)
|
||||||
# keys ordered like `leaves`
|
# keys ordered like `leaves`
|
||||||
comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
|
comments: dict[LeafID, list[Leaf]] = field(default_factory=dict)
|
||||||
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
|
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
|
||||||
inside_brackets: bool = False
|
inside_brackets: bool = False
|
||||||
should_split_rhs: bool = False
|
should_split_rhs: bool = False
|
||||||
@ -426,7 +415,7 @@ def append_comment(self, comment: Leaf) -> bool:
|
|||||||
self.comments.setdefault(id(last_leaf), []).append(comment)
|
self.comments.setdefault(id(last_leaf), []).append(comment)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def comments_after(self, leaf: Leaf) -> List[Leaf]:
|
def comments_after(self, leaf: Leaf) -> list[Leaf]:
|
||||||
"""Generate comments that should appear directly after `leaf`."""
|
"""Generate comments that should appear directly after `leaf`."""
|
||||||
return self.comments.get(id(leaf), [])
|
return self.comments.get(id(leaf), [])
|
||||||
|
|
||||||
@ -459,13 +448,13 @@ def is_complex_subscript(self, leaf: Leaf) -> bool:
|
|||||||
|
|
||||||
def enumerate_with_length(
|
def enumerate_with_length(
|
||||||
self, is_reversed: bool = False
|
self, is_reversed: bool = False
|
||||||
) -> Iterator[Tuple[Index, Leaf, int]]:
|
) -> Iterator[tuple[Index, Leaf, int]]:
|
||||||
"""Return an enumeration of leaves with their length.
|
"""Return an enumeration of leaves with their length.
|
||||||
|
|
||||||
Stops prematurely on multiline strings and standalone comments.
|
Stops prematurely on multiline strings and standalone comments.
|
||||||
"""
|
"""
|
||||||
op = cast(
|
op = cast(
|
||||||
Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
|
Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]],
|
||||||
enumerate_reversed if is_reversed else enumerate,
|
enumerate_reversed if is_reversed else enumerate,
|
||||||
)
|
)
|
||||||
for index, leaf in op(self.leaves):
|
for index, leaf in op(self.leaves):
|
||||||
@ -531,11 +520,11 @@ class LinesBlock:
|
|||||||
previous_block: Optional["LinesBlock"]
|
previous_block: Optional["LinesBlock"]
|
||||||
original_line: Line
|
original_line: Line
|
||||||
before: int = 0
|
before: int = 0
|
||||||
content_lines: List[str] = field(default_factory=list)
|
content_lines: list[str] = field(default_factory=list)
|
||||||
after: int = 0
|
after: int = 0
|
||||||
form_feed: bool = False
|
form_feed: bool = False
|
||||||
|
|
||||||
def all_lines(self) -> List[str]:
|
def all_lines(self) -> list[str]:
|
||||||
empty_line = str(Line(mode=self.mode))
|
empty_line = str(Line(mode=self.mode))
|
||||||
prefix = make_simple_prefix(self.before, self.form_feed, empty_line)
|
prefix = make_simple_prefix(self.before, self.form_feed, empty_line)
|
||||||
return [prefix] + self.content_lines + [empty_line * self.after]
|
return [prefix] + self.content_lines + [empty_line * self.after]
|
||||||
@ -554,7 +543,7 @@ class EmptyLineTracker:
|
|||||||
mode: Mode
|
mode: Mode
|
||||||
previous_line: Optional[Line] = None
|
previous_line: Optional[Line] = None
|
||||||
previous_block: Optional[LinesBlock] = None
|
previous_block: Optional[LinesBlock] = None
|
||||||
previous_defs: List[Line] = field(default_factory=list)
|
previous_defs: list[Line] = field(default_factory=list)
|
||||||
semantic_leading_comment: Optional[LinesBlock] = None
|
semantic_leading_comment: Optional[LinesBlock] = None
|
||||||
|
|
||||||
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
||||||
@ -607,7 +596,7 @@ def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
|||||||
self.previous_block = block
|
self.previous_block = block
|
||||||
return block
|
return block
|
||||||
|
|
||||||
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C901
|
def _maybe_empty_lines(self, current_line: Line) -> tuple[int, int]: # noqa: C901
|
||||||
max_allowed = 1
|
max_allowed = 1
|
||||||
if current_line.depth == 0:
|
if current_line.depth == 0:
|
||||||
max_allowed = 1 if self.mode.is_pyi else 2
|
max_allowed = 1 if self.mode.is_pyi else 2
|
||||||
@ -693,7 +682,7 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C9
|
|||||||
|
|
||||||
def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
||||||
self, current_line: Line, before: int, user_had_newline: bool
|
self, current_line: Line, before: int, user_had_newline: bool
|
||||||
) -> Tuple[int, int]:
|
) -> tuple[int, int]:
|
||||||
assert self.previous_line is not None
|
assert self.previous_line is not None
|
||||||
|
|
||||||
if self.previous_line.is_decorator:
|
if self.previous_line.is_decorator:
|
||||||
@ -772,7 +761,7 @@ def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
|||||||
return newlines, 0
|
return newlines, 0
|
||||||
|
|
||||||
|
|
||||||
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]:
|
||||||
"""Like `reversed(enumerate(sequence))` if that were possible."""
|
"""Like `reversed(enumerate(sequence))` if that were possible."""
|
||||||
index = len(sequence) - 1
|
index = len(sequence) - 1
|
||||||
for element in reversed(sequence):
|
for element in reversed(sequence):
|
||||||
@ -781,7 +770,7 @@ def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
|||||||
|
|
||||||
|
|
||||||
def append_leaves(
|
def append_leaves(
|
||||||
new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
|
new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Append leaves (taken from @old_line) to @new_line, making sure to fix the
|
Append leaves (taken from @old_line) to @new_line, making sure to fix the
|
||||||
@ -838,10 +827,10 @@ def is_line_short_enough( # noqa: C901
|
|||||||
# Depth (which is based on the existing bracket_depth concept)
|
# Depth (which is based on the existing bracket_depth concept)
|
||||||
# is needed to determine nesting level of the MLS.
|
# is needed to determine nesting level of the MLS.
|
||||||
# Includes special case for trailing commas.
|
# Includes special case for trailing commas.
|
||||||
commas: List[int] = [] # tracks number of commas per depth level
|
commas: list[int] = [] # tracks number of commas per depth level
|
||||||
multiline_string: Optional[Leaf] = None
|
multiline_string: Optional[Leaf] = None
|
||||||
# store the leaves that contain parts of the MLS
|
# store the leaves that contain parts of the MLS
|
||||||
multiline_string_contexts: List[LN] = []
|
multiline_string_contexts: list[LN] = []
|
||||||
|
|
||||||
max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS
|
max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS
|
||||||
for i, leaf in enumerate(line.leaves):
|
for i, leaf in enumerate(line.leaves):
|
||||||
@ -865,7 +854,7 @@ def is_line_short_enough( # noqa: C901
|
|||||||
if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
|
if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
|
||||||
# Inside brackets, ignore trailing comma
|
# Inside brackets, ignore trailing comma
|
||||||
# directly after MLS/MLS-containing expression
|
# directly after MLS/MLS-containing expression
|
||||||
ignore_ctxs: List[Optional[LN]] = [None]
|
ignore_ctxs: list[Optional[LN]] = [None]
|
||||||
ignore_ctxs += multiline_string_contexts
|
ignore_ctxs += multiline_string_contexts
|
||||||
if (line.inside_brackets or leaf.bracket_depth > 0) and (
|
if (line.inside_brackets or leaf.bracket_depth > 0) and (
|
||||||
i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs
|
i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from operator import attrgetter
|
from operator import attrgetter
|
||||||
from typing import Dict, Final, Set
|
from typing import Final
|
||||||
|
|
||||||
from black.const import DEFAULT_LINE_LENGTH
|
from black.const import DEFAULT_LINE_LENGTH
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ class Feature(Enum):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
|
VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = {
|
||||||
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
||||||
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
||||||
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
||||||
@ -189,7 +189,7 @@ class Feature(Enum):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
|
def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool:
|
||||||
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
||||||
|
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ class Preview(Enum):
|
|||||||
pep646_typed_star_arg_type_var_tuple = auto()
|
pep646_typed_star_arg_type_var_tuple = auto()
|
||||||
|
|
||||||
|
|
||||||
UNSTABLE_FEATURES: Set[Preview] = {
|
UNSTABLE_FEATURES: set[Preview] = {
|
||||||
# Many issues, see summary in https://github.com/psf/black/issues/4042
|
# Many issues, see summary in https://github.com/psf/black/issues/4042
|
||||||
Preview.string_processing,
|
Preview.string_processing,
|
||||||
# See issues #3452 and #4158
|
# See issues #3452 and #4158
|
||||||
@ -234,17 +234,17 @@ class Deprecated(UserWarning):
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Mode:
|
class Mode:
|
||||||
target_versions: Set[TargetVersion] = field(default_factory=set)
|
target_versions: set[TargetVersion] = field(default_factory=set)
|
||||||
line_length: int = DEFAULT_LINE_LENGTH
|
line_length: int = DEFAULT_LINE_LENGTH
|
||||||
string_normalization: bool = True
|
string_normalization: bool = True
|
||||||
is_pyi: bool = False
|
is_pyi: bool = False
|
||||||
is_ipynb: bool = False
|
is_ipynb: bool = False
|
||||||
skip_source_first_line: bool = False
|
skip_source_first_line: bool = False
|
||||||
magic_trailing_comma: bool = True
|
magic_trailing_comma: bool = True
|
||||||
python_cell_magics: Set[str] = field(default_factory=set)
|
python_cell_magics: set[str] = field(default_factory=set)
|
||||||
preview: bool = False
|
preview: bool = False
|
||||||
unstable: bool = False
|
unstable: bool = False
|
||||||
enabled_features: Set[Preview] = field(default_factory=set)
|
enabled_features: set[Preview] = field(default_factory=set)
|
||||||
|
|
||||||
def __contains__(self, feature: Preview) -> bool:
|
def __contains__(self, feature: Preview) -> bool:
|
||||||
"""
|
"""
|
||||||
|
@ -3,18 +3,7 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from typing import (
|
from typing import Final, Generic, Iterator, Literal, Optional, TypeVar, Union
|
||||||
Final,
|
|
||||||
Generic,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Literal,
|
|
||||||
Optional,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 10):
|
if sys.version_info >= (3, 10):
|
||||||
from typing import TypeGuard
|
from typing import TypeGuard
|
||||||
@ -462,7 +451,7 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool:
|
def prev_siblings_are(node: Optional[LN], tokens: list[Optional[NodeType]]) -> bool:
|
||||||
"""Return if the `node` and its previous siblings match types against the provided
|
"""Return if the `node` and its previous siblings match types against the provided
|
||||||
list of tokens; the provided `node`has its type matched against the last element in
|
list of tokens; the provided `node`has its type matched against the last element in
|
||||||
the list. `None` can be used as the first element to declare that the start of the
|
the list. `None` can be used as the first element to declare that the start of the
|
||||||
@ -634,8 +623,8 @@ def is_tuple_containing_walrus(node: LN) -> bool:
|
|||||||
def is_one_sequence_between(
|
def is_one_sequence_between(
|
||||||
opening: Leaf,
|
opening: Leaf,
|
||||||
closing: Leaf,
|
closing: Leaf,
|
||||||
leaves: List[Leaf],
|
leaves: list[Leaf],
|
||||||
brackets: Tuple[int, int] = (token.LPAR, token.RPAR),
|
brackets: tuple[int, int] = (token.LPAR, token.RPAR),
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Return True if content between `opening` and `closing` is a one-sequence."""
|
"""Return True if content between `opening` and `closing` is a one-sequence."""
|
||||||
if (opening.type, closing.type) != brackets:
|
if (opening.type, closing.type) != brackets:
|
||||||
@ -745,7 +734,7 @@ def is_yield(node: LN) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
|
def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool:
|
||||||
"""Return True if `leaf` is a star or double star in a vararg or kwarg.
|
"""Return True if `leaf` is a star or double star in a vararg or kwarg.
|
||||||
|
|
||||||
If `within` includes VARARGS_PARENTS, this applies to function signatures.
|
If `within` includes VARARGS_PARENTS, this applies to function signatures.
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import Any, List, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from click import echo, style
|
from click import echo, style
|
||||||
from mypy_extensions import mypyc_attr
|
from mypy_extensions import mypyc_attr
|
||||||
@ -59,7 +59,7 @@ def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
|||||||
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
|
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
|
||||||
|
|
||||||
|
|
||||||
def _splitlines_no_ff(source: str) -> List[str]:
|
def _splitlines_no_ff(source: str) -> list[str]:
|
||||||
"""Split a string into lines ignoring form feed and other chars.
|
"""Split a string into lines ignoring form feed and other chars.
|
||||||
|
|
||||||
This mimics how the Python parser splits source code.
|
This mimics how the Python parser splits source code.
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
import ast
|
import ast
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
from typing import Collection, Iterator, List, Set, Tuple
|
from typing import Collection, Iterator
|
||||||
|
|
||||||
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
||||||
from black.nodes import syms
|
from black.nodes import syms
|
||||||
@ -21,7 +21,7 @@ class InvalidInput(ValueError):
|
|||||||
"""Raised when input source code fails all parse attempts."""
|
"""Raised when input source code fails all parse attempts."""
|
||||||
|
|
||||||
|
|
||||||
def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
|
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
|
||||||
if not target_versions:
|
if not target_versions:
|
||||||
# No target_version specified, so try all grammars.
|
# No target_version specified, so try all grammars.
|
||||||
return [
|
return [
|
||||||
@ -123,7 +123,7 @@ class ASTSafetyError(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def _parse_single_version(
|
def _parse_single_version(
|
||||||
src: str, version: Tuple[int, int], *, type_comments: bool
|
src: str, version: tuple[int, int], *, type_comments: bool
|
||||||
) -> ast.AST:
|
) -> ast.AST:
|
||||||
filename = "<unknown>"
|
filename = "<unknown>"
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
@ -159,7 +159,7 @@ def parse_ast(src: str) -> ast.AST:
|
|||||||
def _normalize(lineend: str, value: str) -> str:
|
def _normalize(lineend: str, value: str) -> str:
|
||||||
# To normalize, we strip any leading and trailing space from
|
# To normalize, we strip any leading and trailing space from
|
||||||
# each line...
|
# each line...
|
||||||
stripped: List[str] = [i.strip() for i in value.splitlines()]
|
stripped: list[str] = [i.strip() for i in value.splitlines()]
|
||||||
normalized = lineend.join(stripped)
|
normalized = lineend.join(stripped)
|
||||||
# ...and remove any blank lines at the beginning and end of
|
# ...and remove any blank lines at the beginning and end of
|
||||||
# the whole string
|
# the whole string
|
||||||
@ -172,14 +172,14 @@ def stringify_ast(node: ast.AST) -> Iterator[str]:
|
|||||||
|
|
||||||
|
|
||||||
def _stringify_ast_with_new_parent(
|
def _stringify_ast_with_new_parent(
|
||||||
node: ast.AST, parent_stack: List[ast.AST], new_parent: ast.AST
|
node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST
|
||||||
) -> Iterator[str]:
|
) -> Iterator[str]:
|
||||||
parent_stack.append(new_parent)
|
parent_stack.append(new_parent)
|
||||||
yield from _stringify_ast(node, parent_stack)
|
yield from _stringify_ast(node, parent_stack)
|
||||||
parent_stack.pop()
|
parent_stack.pop()
|
||||||
|
|
||||||
|
|
||||||
def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]:
|
||||||
if (
|
if (
|
||||||
isinstance(node, ast.Constant)
|
isinstance(node, ast.Constant)
|
||||||
and isinstance(node.value, str)
|
and isinstance(node.value, str)
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import difflib
|
import difflib
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Collection, Iterator, List, Sequence, Set, Tuple, Union
|
from typing import Collection, Iterator, Sequence, Union
|
||||||
|
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
LN,
|
LN,
|
||||||
@ -18,8 +18,8 @@
|
|||||||
from blib2to3.pgen2.token import ASYNC, NEWLINE
|
from blib2to3.pgen2.token import ASYNC, NEWLINE
|
||||||
|
|
||||||
|
|
||||||
def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]:
|
||||||
lines: List[Tuple[int, int]] = []
|
lines: list[tuple[int, int]] = []
|
||||||
for lines_str in line_ranges:
|
for lines_str in line_ranges:
|
||||||
parts = lines_str.split("-")
|
parts = lines_str.split("-")
|
||||||
if len(parts) != 2:
|
if len(parts) != 2:
|
||||||
@ -40,14 +40,14 @@ def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
|||||||
return lines
|
return lines
|
||||||
|
|
||||||
|
|
||||||
def is_valid_line_range(lines: Tuple[int, int]) -> bool:
|
def is_valid_line_range(lines: tuple[int, int]) -> bool:
|
||||||
"""Returns whether the line range is valid."""
|
"""Returns whether the line range is valid."""
|
||||||
return not lines or lines[0] <= lines[1]
|
return not lines or lines[0] <= lines[1]
|
||||||
|
|
||||||
|
|
||||||
def sanitized_lines(
|
def sanitized_lines(
|
||||||
lines: Collection[Tuple[int, int]], src_contents: str
|
lines: Collection[tuple[int, int]], src_contents: str
|
||||||
) -> Collection[Tuple[int, int]]:
|
) -> Collection[tuple[int, int]]:
|
||||||
"""Returns the valid line ranges for the given source.
|
"""Returns the valid line ranges for the given source.
|
||||||
|
|
||||||
This removes ranges that are entirely outside the valid lines.
|
This removes ranges that are entirely outside the valid lines.
|
||||||
@ -74,10 +74,10 @@ def sanitized_lines(
|
|||||||
|
|
||||||
|
|
||||||
def adjusted_lines(
|
def adjusted_lines(
|
||||||
lines: Collection[Tuple[int, int]],
|
lines: Collection[tuple[int, int]],
|
||||||
original_source: str,
|
original_source: str,
|
||||||
modified_source: str,
|
modified_source: str,
|
||||||
) -> List[Tuple[int, int]]:
|
) -> list[tuple[int, int]]:
|
||||||
"""Returns the adjusted line ranges based on edits from the original code.
|
"""Returns the adjusted line ranges based on edits from the original code.
|
||||||
|
|
||||||
This computes the new line ranges by diffing original_source and
|
This computes the new line ranges by diffing original_source and
|
||||||
@ -153,7 +153,7 @@ def adjusted_lines(
|
|||||||
return new_lines
|
return new_lines
|
||||||
|
|
||||||
|
|
||||||
def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]]) -> None:
|
def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None:
|
||||||
"""Converts unchanged lines to STANDALONE_COMMENT.
|
"""Converts unchanged lines to STANDALONE_COMMENT.
|
||||||
|
|
||||||
The idea is similar to how `# fmt: on/off` is implemented. It also converts the
|
The idea is similar to how `# fmt: on/off` is implemented. It also converts the
|
||||||
@ -177,7 +177,7 @@ def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]])
|
|||||||
more formatting to pass (1). However, it's hard to get it correct when
|
more formatting to pass (1). However, it's hard to get it correct when
|
||||||
incorrect indentations are used. So we defer this to future optimizations.
|
incorrect indentations are used. So we defer this to future optimizations.
|
||||||
"""
|
"""
|
||||||
lines_set: Set[int] = set()
|
lines_set: set[int] = set()
|
||||||
for start, end in lines:
|
for start, end in lines:
|
||||||
lines_set.update(range(start, end + 1))
|
lines_set.update(range(start, end + 1))
|
||||||
visitor = _TopLevelStatementsVisitor(lines_set)
|
visitor = _TopLevelStatementsVisitor(lines_set)
|
||||||
@ -205,7 +205,7 @@ class _TopLevelStatementsVisitor(Visitor[None]):
|
|||||||
classes/functions/statements.
|
classes/functions/statements.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, lines_set: Set[int]):
|
def __init__(self, lines_set: set[int]):
|
||||||
self._lines_set = lines_set
|
self._lines_set = lines_set
|
||||||
|
|
||||||
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
|
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
|
||||||
@ -249,7 +249,7 @@ def visit_suite(self, node: Node) -> Iterator[None]:
|
|||||||
_convert_node_to_standalone_comment(semantic_parent)
|
_convert_node_to_standalone_comment(semantic_parent)
|
||||||
|
|
||||||
|
|
||||||
def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None:
|
||||||
"""Converts unchanged to STANDALONE_COMMENT line by line."""
|
"""Converts unchanged to STANDALONE_COMMENT line by line."""
|
||||||
for leaf in node.leaves():
|
for leaf in node.leaves():
|
||||||
if leaf.type != NEWLINE:
|
if leaf.type != NEWLINE:
|
||||||
@ -261,7 +261,7 @@ def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
|||||||
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
|
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
|
||||||
# Here we need to check `subject_expr`. The `case_block+` will be
|
# Here we need to check `subject_expr`. The `case_block+` will be
|
||||||
# checked by their own NEWLINEs.
|
# checked by their own NEWLINEs.
|
||||||
nodes_to_ignore: List[LN] = []
|
nodes_to_ignore: list[LN] = []
|
||||||
prev_sibling = leaf.prev_sibling
|
prev_sibling = leaf.prev_sibling
|
||||||
while prev_sibling:
|
while prev_sibling:
|
||||||
nodes_to_ignore.insert(0, prev_sibling)
|
nodes_to_ignore.insert(0, prev_sibling)
|
||||||
@ -382,7 +382,7 @@ def _leaf_line_end(leaf: Leaf) -> int:
|
|||||||
return leaf.lineno + str(leaf).count("\n")
|
return leaf.lineno + str(leaf).count("\n")
|
||||||
|
|
||||||
|
|
||||||
def _get_line_range(node_or_nodes: Union[LN, List[LN]]) -> Set[int]:
|
def _get_line_range(node_or_nodes: Union[LN, list[LN]]) -> set[int]:
|
||||||
"""Returns the line range of this node or list of nodes."""
|
"""Returns the line range of this node or list of nodes."""
|
||||||
if isinstance(node_or_nodes, list):
|
if isinstance(node_or_nodes, list):
|
||||||
nodes = node_or_nodes
|
nodes = node_or_nodes
|
||||||
@ -463,7 +463,7 @@ def _calculate_lines_mappings(
|
|||||||
modified_source.splitlines(keepends=True),
|
modified_source.splitlines(keepends=True),
|
||||||
)
|
)
|
||||||
matching_blocks = matcher.get_matching_blocks()
|
matching_blocks = matcher.get_matching_blocks()
|
||||||
lines_mappings: List[_LinesMapping] = []
|
lines_mappings: list[_LinesMapping] = []
|
||||||
# matching_blocks is a sequence of "same block of code ranges", see
|
# matching_blocks is a sequence of "same block of code ranges", see
|
||||||
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
|
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
|
||||||
# Each block corresponds to a _LinesMapping with is_changed_block=False,
|
# Each block corresponds to a _LinesMapping with is_changed_block=False,
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from typing import Final, List, Match, Pattern, Tuple
|
from typing import Final, Match, Pattern
|
||||||
|
|
||||||
from black._width_table import WIDTH_TABLE
|
from black._width_table import WIDTH_TABLE
|
||||||
from blib2to3.pytree import Leaf
|
from blib2to3.pytree import Leaf
|
||||||
@ -43,7 +43,7 @@ def has_triple_quotes(string: str) -> bool:
|
|||||||
return raw_string[:3] in {'"""', "'''"}
|
return raw_string[:3] in {'"""', "'''"}
|
||||||
|
|
||||||
|
|
||||||
def lines_with_leading_tabs_expanded(s: str) -> List[str]:
|
def lines_with_leading_tabs_expanded(s: str) -> list[str]:
|
||||||
"""
|
"""
|
||||||
Splits string into lines and expands only leading tabs (following the normal
|
Splits string into lines and expands only leading tabs (following the normal
|
||||||
Python rules)
|
Python rules)
|
||||||
@ -242,9 +242,9 @@ def normalize_string_quotes(s: str) -> str:
|
|||||||
|
|
||||||
def normalize_fstring_quotes(
|
def normalize_fstring_quotes(
|
||||||
quote: str,
|
quote: str,
|
||||||
middles: List[Leaf],
|
middles: list[Leaf],
|
||||||
is_raw_fstring: bool,
|
is_raw_fstring: bool,
|
||||||
) -> Tuple[List[Leaf], str]:
|
) -> tuple[list[Leaf], str]:
|
||||||
"""Prefer double quotes but only if it doesn't cause more escaping.
|
"""Prefer double quotes but only if it doesn't cause more escaping.
|
||||||
|
|
||||||
Adds or removes backslashes as appropriate.
|
Adds or removes backslashes as appropriate.
|
||||||
|
@ -11,16 +11,12 @@
|
|||||||
Callable,
|
Callable,
|
||||||
ClassVar,
|
ClassVar,
|
||||||
Collection,
|
Collection,
|
||||||
Dict,
|
|
||||||
Final,
|
Final,
|
||||||
Iterable,
|
Iterable,
|
||||||
Iterator,
|
Iterator,
|
||||||
List,
|
|
||||||
Literal,
|
Literal,
|
||||||
Optional,
|
Optional,
|
||||||
Sequence,
|
Sequence,
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
TypeVar,
|
||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
@ -68,7 +64,7 @@ class CannotTransform(Exception):
|
|||||||
ParserState = int
|
ParserState = int
|
||||||
StringID = int
|
StringID = int
|
||||||
TResult = Result[T, CannotTransform] # (T)ransform Result
|
TResult = Result[T, CannotTransform] # (T)ransform Result
|
||||||
TMatchResult = TResult[List[Index]]
|
TMatchResult = TResult[list[Index]]
|
||||||
|
|
||||||
SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops
|
SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops
|
||||||
|
|
||||||
@ -179,7 +175,7 @@ def original_is_simple_lookup_func(
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int]) -> bool:
|
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool:
|
||||||
"""
|
"""
|
||||||
Handling the determination of is_simple_lookup for the lines prior to the doublestar
|
Handling the determination of is_simple_lookup for the lines prior to the doublestar
|
||||||
token. This is required because of the need to isolate the chained expression
|
token. This is required because of the need to isolate the chained expression
|
||||||
@ -202,7 +198,7 @@ def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int])
|
|||||||
|
|
||||||
|
|
||||||
def handle_is_simple_lookup_forward(
|
def handle_is_simple_lookup_forward(
|
||||||
line: Line, index: int, disallowed: Set[int]
|
line: Line, index: int, disallowed: set[int]
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Handling decision is_simple_lookup for the lines behind the doublestar token.
|
Handling decision is_simple_lookup for the lines behind the doublestar token.
|
||||||
@ -227,7 +223,7 @@ def handle_is_simple_lookup_forward(
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def is_expression_chained(chained_leaves: List[Leaf]) -> bool:
|
def is_expression_chained(chained_leaves: list[Leaf]) -> bool:
|
||||||
"""
|
"""
|
||||||
Function to determine if the variable is a chained call.
|
Function to determine if the variable is a chained call.
|
||||||
(e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call)
|
(e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call)
|
||||||
@ -298,7 +294,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
"""
|
"""
|
||||||
Yields:
|
Yields:
|
||||||
@ -388,8 +384,8 @@ class CustomSplitMapMixin:
|
|||||||
the resultant substrings go over the configured max line length.
|
the resultant substrings go over the configured max line length.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_Key: ClassVar = Tuple[StringID, str]
|
_Key: ClassVar = tuple[StringID, str]
|
||||||
_CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict(
|
_CUSTOM_SPLIT_MAP: ClassVar[dict[_Key, tuple[CustomSplit, ...]]] = defaultdict(
|
||||||
tuple
|
tuple
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -413,7 +409,7 @@ def add_custom_splits(
|
|||||||
key = self._get_key(string)
|
key = self._get_key(string)
|
||||||
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
||||||
|
|
||||||
def pop_custom_splits(self, string: str) -> List[CustomSplit]:
|
def pop_custom_splits(self, string: str) -> list[CustomSplit]:
|
||||||
"""Custom Split Map Getter Method
|
"""Custom Split Map Getter Method
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@ -512,7 +508,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
return TErr("This line has no strings that need merging.")
|
return TErr("This line has no strings that need merging.")
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
new_line = line
|
new_line = line
|
||||||
|
|
||||||
@ -543,7 +539,7 @@ def do_transform(
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _remove_backslash_line_continuation_chars(
|
def _remove_backslash_line_continuation_chars(
|
||||||
line: Line, string_indices: List[int]
|
line: Line, string_indices: list[int]
|
||||||
) -> TResult[Line]:
|
) -> TResult[Line]:
|
||||||
"""
|
"""
|
||||||
Merge strings that were split across multiple lines using
|
Merge strings that were split across multiple lines using
|
||||||
@ -584,7 +580,7 @@ def _remove_backslash_line_continuation_chars(
|
|||||||
return Ok(new_line)
|
return Ok(new_line)
|
||||||
|
|
||||||
def _merge_string_group(
|
def _merge_string_group(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> TResult[Line]:
|
) -> TResult[Line]:
|
||||||
"""
|
"""
|
||||||
Merges string groups (i.e. set of adjacent strings).
|
Merges string groups (i.e. set of adjacent strings).
|
||||||
@ -603,7 +599,7 @@ def _merge_string_group(
|
|||||||
is_valid_index = is_valid_index_factory(LL)
|
is_valid_index = is_valid_index_factory(LL)
|
||||||
|
|
||||||
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
|
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
|
||||||
merged_string_idx_dict: Dict[int, Tuple[int, Leaf]] = {}
|
merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {}
|
||||||
for string_idx in string_indices:
|
for string_idx in string_indices:
|
||||||
vresult = self._validate_msg(line, string_idx)
|
vresult = self._validate_msg(line, string_idx)
|
||||||
if isinstance(vresult, Err):
|
if isinstance(vresult, Err):
|
||||||
@ -639,8 +635,8 @@ def _merge_string_group(
|
|||||||
return Ok(new_line)
|
return Ok(new_line)
|
||||||
|
|
||||||
def _merge_one_string_group(
|
def _merge_one_string_group(
|
||||||
self, LL: List[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
||||||
) -> Tuple[int, Leaf]:
|
) -> tuple[int, Leaf]:
|
||||||
"""
|
"""
|
||||||
Merges one string group where the first string in the group is
|
Merges one string group where the first string in the group is
|
||||||
`LL[string_idx]`.
|
`LL[string_idx]`.
|
||||||
@ -1004,11 +1000,11 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
return TErr("This line has no strings wrapped in parens.")
|
return TErr("This line has no strings wrapped in parens.")
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
|
|
||||||
string_and_rpar_indices: List[int] = []
|
string_and_rpar_indices: list[int] = []
|
||||||
for string_idx in string_indices:
|
for string_idx in string_indices:
|
||||||
string_parser = StringParser()
|
string_parser = StringParser()
|
||||||
rpar_idx = string_parser.parse(LL, string_idx)
|
rpar_idx = string_parser.parse(LL, string_idx)
|
||||||
@ -1031,7 +1027,7 @@ def do_transform(
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _transform_to_new_line(
|
def _transform_to_new_line(
|
||||||
self, line: Line, string_and_rpar_indices: List[int]
|
self, line: Line, string_and_rpar_indices: list[int]
|
||||||
) -> Line:
|
) -> Line:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
|
|
||||||
@ -1284,7 +1280,7 @@ def _get_max_string_length(self, line: Line, string_idx: int) -> int:
|
|||||||
return max_string_length
|
return max_string_length
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
def _prefer_paren_wrap_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -1329,14 +1325,14 @@ def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:
|
def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]:
|
||||||
"""
|
"""
|
||||||
Yields spans corresponding to expressions in a given f-string.
|
Yields spans corresponding to expressions in a given f-string.
|
||||||
Spans are half-open ranges (left inclusive, right exclusive).
|
Spans are half-open ranges (left inclusive, right exclusive).
|
||||||
Assumes the input string is a valid f-string, but will not crash if the input
|
Assumes the input string is a valid f-string, but will not crash if the input
|
||||||
string is invalid.
|
string is invalid.
|
||||||
"""
|
"""
|
||||||
stack: List[int] = [] # our curly paren stack
|
stack: list[int] = [] # our curly paren stack
|
||||||
i = 0
|
i = 0
|
||||||
while i < len(s):
|
while i < len(s):
|
||||||
if s[i] == "{":
|
if s[i] == "{":
|
||||||
@ -1499,7 +1495,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
|||||||
return Ok([string_idx])
|
return Ok([string_idx])
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
assert len(string_indices) == 1, (
|
assert len(string_indices) == 1, (
|
||||||
@ -1601,7 +1597,7 @@ def more_splits_should_be_made() -> bool:
|
|||||||
else:
|
else:
|
||||||
return str_width(rest_value) > max_last_string_column()
|
return str_width(rest_value) > max_last_string_column()
|
||||||
|
|
||||||
string_line_results: List[Ok[Line]] = []
|
string_line_results: list[Ok[Line]] = []
|
||||||
while more_splits_should_be_made():
|
while more_splits_should_be_made():
|
||||||
if use_custom_breakpoints:
|
if use_custom_breakpoints:
|
||||||
# Custom User Split (manual)
|
# Custom User Split (manual)
|
||||||
@ -1730,7 +1726,7 @@ def more_splits_should_be_made() -> bool:
|
|||||||
last_line.comments = line.comments.copy()
|
last_line.comments = line.comments.copy()
|
||||||
yield Ok(last_line)
|
yield Ok(last_line)
|
||||||
|
|
||||||
def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||||
"""
|
"""
|
||||||
Yields:
|
Yields:
|
||||||
All ranges of @string which, if @string were to be split there,
|
All ranges of @string which, if @string were to be split there,
|
||||||
@ -1761,7 +1757,7 @@ def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
|||||||
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
|
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
|
||||||
yield begin, end
|
yield begin, end
|
||||||
|
|
||||||
def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||||
"""
|
"""
|
||||||
Yields:
|
Yields:
|
||||||
All ranges of @string which, if @string were to be split there,
|
All ranges of @string which, if @string were to be split there,
|
||||||
@ -1772,8 +1768,8 @@ def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
|||||||
return
|
return
|
||||||
yield from iter_fexpr_spans(string)
|
yield from iter_fexpr_spans(string)
|
||||||
|
|
||||||
def _get_illegal_split_indices(self, string: str) -> Set[Index]:
|
def _get_illegal_split_indices(self, string: str) -> set[Index]:
|
||||||
illegal_indices: Set[Index] = set()
|
illegal_indices: set[Index] = set()
|
||||||
iterators = [
|
iterators = [
|
||||||
self._iter_fexpr_slices(string),
|
self._iter_fexpr_slices(string),
|
||||||
self._iter_nameescape_slices(string),
|
self._iter_nameescape_slices(string),
|
||||||
@ -1899,7 +1895,7 @@ def _normalize_f_string(self, string: str, prefix: str) -> str:
|
|||||||
else:
|
else:
|
||||||
return string
|
return string
|
||||||
|
|
||||||
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]:
|
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]:
|
||||||
LL = list(leaves)
|
LL = list(leaves)
|
||||||
|
|
||||||
string_op_leaves = []
|
string_op_leaves = []
|
||||||
@ -2008,7 +2004,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
|||||||
return TErr("This line does not contain any non-atomic strings.")
|
return TErr("This line does not contain any non-atomic strings.")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _return_match(LL: List[Leaf]) -> Optional[int]:
|
def _return_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2033,7 +2029,7 @@ def _return_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _else_match(LL: List[Leaf]) -> Optional[int]:
|
def _else_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2060,7 +2056,7 @@ def _else_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
def _assert_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2095,7 +2091,7 @@ def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
def _assign_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2142,7 +2138,7 @@ def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
def _dict_or_lambda_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2181,7 +2177,7 @@ def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
assert len(string_indices) == 1, (
|
assert len(string_indices) == 1, (
|
||||||
@ -2347,7 +2343,7 @@ class StringParser:
|
|||||||
DONE: Final = 8
|
DONE: Final = 8
|
||||||
|
|
||||||
# Lookup Table for Next State
|
# Lookup Table for Next State
|
||||||
_goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = {
|
_goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = {
|
||||||
# A string trailer may start with '.' OR '%'.
|
# A string trailer may start with '.' OR '%'.
|
||||||
(START, token.DOT): DOT,
|
(START, token.DOT): DOT,
|
||||||
(START, token.PERCENT): PERCENT,
|
(START, token.PERCENT): PERCENT,
|
||||||
@ -2376,7 +2372,7 @@ def __init__(self) -> None:
|
|||||||
self._state = self.START
|
self._state = self.START
|
||||||
self._unmatched_lpars = 0
|
self._unmatched_lpars = 0
|
||||||
|
|
||||||
def parse(self, leaves: List[Leaf], string_idx: int) -> int:
|
def parse(self, leaves: list[Leaf], string_idx: int) -> int:
|
||||||
"""
|
"""
|
||||||
Pre-conditions:
|
Pre-conditions:
|
||||||
* @leaves[@string_idx].type == token.STRING
|
* @leaves[@string_idx].type == token.STRING
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from multiprocessing import freeze_support
|
from multiprocessing import freeze_support
|
||||||
from typing import Set, Tuple
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
@ -191,7 +190,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
|||||||
|
|
||||||
preview = bool(headers.get(PREVIEW, False))
|
preview = bool(headers.get(PREVIEW, False))
|
||||||
unstable = bool(headers.get(UNSTABLE, False))
|
unstable = bool(headers.get(UNSTABLE, False))
|
||||||
enable_features: Set[black.Preview] = set()
|
enable_features: set[black.Preview] = set()
|
||||||
enable_unstable_features = headers.get(ENABLE_UNSTABLE_FEATURE, "").split(",")
|
enable_unstable_features = headers.get(ENABLE_UNSTABLE_FEATURE, "").split(",")
|
||||||
for piece in enable_unstable_features:
|
for piece in enable_unstable_features:
|
||||||
piece = piece.strip()
|
piece = piece.strip()
|
||||||
@ -216,7 +215,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
|
def parse_python_variant_header(value: str) -> tuple[bool, set[black.TargetVersion]]:
|
||||||
if value == "pyi":
|
if value == "pyi":
|
||||||
return True, set()
|
return True, set()
|
||||||
else:
|
else:
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from logging import Logger
|
from logging import Logger
|
||||||
from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast
|
from typing import IO, Any, Iterable, Iterator, Optional, Union, cast
|
||||||
|
|
||||||
from blib2to3.pgen2.grammar import Grammar
|
from blib2to3.pgen2.grammar import Grammar
|
||||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||||
@ -40,7 +40,7 @@
|
|||||||
class ReleaseRange:
|
class ReleaseRange:
|
||||||
start: int
|
start: int
|
||||||
end: Optional[int] = None
|
end: Optional[int] = None
|
||||||
tokens: List[Any] = field(default_factory=list)
|
tokens: list[Any] = field(default_factory=list)
|
||||||
|
|
||||||
def lock(self) -> None:
|
def lock(self) -> None:
|
||||||
total_eaten = len(self.tokens)
|
total_eaten = len(self.tokens)
|
||||||
@ -51,7 +51,7 @@ class TokenProxy:
|
|||||||
def __init__(self, generator: Any) -> None:
|
def __init__(self, generator: Any) -> None:
|
||||||
self._tokens = generator
|
self._tokens = generator
|
||||||
self._counter = 0
|
self._counter = 0
|
||||||
self._release_ranges: List[ReleaseRange] = []
|
self._release_ranges: list[ReleaseRange] = []
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def release(self) -> Iterator["TokenProxy"]:
|
def release(self) -> Iterator["TokenProxy"]:
|
||||||
@ -121,7 +121,7 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
|
|||||||
|
|
||||||
lineno = 1
|
lineno = 1
|
||||||
column = 0
|
column = 0
|
||||||
indent_columns: List[int] = []
|
indent_columns: list[int] = []
|
||||||
type = value = start = end = line_text = None
|
type = value = start = end = line_text = None
|
||||||
prefix = ""
|
prefix = ""
|
||||||
|
|
||||||
@ -202,8 +202,8 @@ def parse_string(self, text: str, debug: bool = False) -> NL:
|
|||||||
)
|
)
|
||||||
return self.parse_tokens(tokens, debug)
|
return self.parse_tokens(tokens, debug)
|
||||||
|
|
||||||
def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
|
def _partially_consume_prefix(self, prefix: str, column: int) -> tuple[str, str]:
|
||||||
lines: List[str] = []
|
lines: list[str] = []
|
||||||
current_line = ""
|
current_line = ""
|
||||||
current_column = 0
|
current_column = 0
|
||||||
wait_for_nl = False
|
wait_for_nl = False
|
||||||
|
@ -16,15 +16,15 @@
|
|||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
|
from typing import Any, Optional, TypeVar, Union
|
||||||
|
|
||||||
# Local imports
|
# Local imports
|
||||||
from . import token
|
from . import token
|
||||||
|
|
||||||
_P = TypeVar("_P", bound="Grammar")
|
_P = TypeVar("_P", bound="Grammar")
|
||||||
Label = Tuple[int, Optional[str]]
|
Label = tuple[int, Optional[str]]
|
||||||
DFA = List[List[Tuple[int, int]]]
|
DFA = list[list[tuple[int, int]]]
|
||||||
DFAS = Tuple[DFA, Dict[int, int]]
|
DFAS = tuple[DFA, dict[int, int]]
|
||||||
Path = Union[str, "os.PathLike[str]"]
|
Path = Union[str, "os.PathLike[str]"]
|
||||||
|
|
||||||
|
|
||||||
@ -83,16 +83,16 @@ class Grammar:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.symbol2number: Dict[str, int] = {}
|
self.symbol2number: dict[str, int] = {}
|
||||||
self.number2symbol: Dict[int, str] = {}
|
self.number2symbol: dict[int, str] = {}
|
||||||
self.states: List[DFA] = []
|
self.states: list[DFA] = []
|
||||||
self.dfas: Dict[int, DFAS] = {}
|
self.dfas: dict[int, DFAS] = {}
|
||||||
self.labels: List[Label] = [(0, "EMPTY")]
|
self.labels: list[Label] = [(0, "EMPTY")]
|
||||||
self.keywords: Dict[str, int] = {}
|
self.keywords: dict[str, int] = {}
|
||||||
self.soft_keywords: Dict[str, int] = {}
|
self.soft_keywords: dict[str, int] = {}
|
||||||
self.tokens: Dict[int, int] = {}
|
self.tokens: dict[int, int] = {}
|
||||||
self.symbol2label: Dict[str, int] = {}
|
self.symbol2label: dict[str, int] = {}
|
||||||
self.version: Tuple[int, int] = (0, 0)
|
self.version: tuple[int, int] = (0, 0)
|
||||||
self.start = 256
|
self.start = 256
|
||||||
# Python 3.7+ parses async as a keyword, not an identifier
|
# Python 3.7+ parses async as a keyword, not an identifier
|
||||||
self.async_keywords = False
|
self.async_keywords = False
|
||||||
@ -114,7 +114,7 @@ def dump(self, filename: Path) -> None:
|
|||||||
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
|
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
|
||||||
os.replace(f.name, filename)
|
os.replace(f.name, filename)
|
||||||
|
|
||||||
def _update(self, attrs: Dict[str, Any]) -> None:
|
def _update(self, attrs: dict[str, Any]) -> None:
|
||||||
for k, v in attrs.items():
|
for k, v in attrs.items():
|
||||||
setattr(self, k, v)
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
"""Safely evaluate Python string literals without using eval()."""
|
"""Safely evaluate Python string literals without using eval()."""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from typing import Dict, Match
|
from typing import Match
|
||||||
|
|
||||||
simple_escapes: Dict[str, str] = {
|
simple_escapes: dict[str, str] = {
|
||||||
"a": "\a",
|
"a": "\a",
|
||||||
"b": "\b",
|
"b": "\b",
|
||||||
"f": "\f",
|
"f": "\f",
|
||||||
|
@ -10,19 +10,7 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from typing import (
|
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Union, cast
|
||||||
TYPE_CHECKING,
|
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
Dict,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
from blib2to3.pgen2.grammar import Grammar
|
from blib2to3.pgen2.grammar import Grammar
|
||||||
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
|
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
|
||||||
@ -34,10 +22,10 @@
|
|||||||
from blib2to3.pgen2.driver import TokenProxy
|
from blib2to3.pgen2.driver import TokenProxy
|
||||||
|
|
||||||
|
|
||||||
Results = Dict[str, NL]
|
Results = dict[str, NL]
|
||||||
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
||||||
DFA = List[List[Tuple[int, int]]]
|
DFA = list[list[tuple[int, int]]]
|
||||||
DFAS = Tuple[DFA, Dict[int, int]]
|
DFAS = tuple[DFA, dict[int, int]]
|
||||||
|
|
||||||
|
|
||||||
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
||||||
@ -50,24 +38,24 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
|||||||
|
|
||||||
|
|
||||||
def stack_copy(
|
def stack_copy(
|
||||||
stack: List[Tuple[DFAS, int, RawNode]],
|
stack: list[tuple[DFAS, int, RawNode]],
|
||||||
) -> List[Tuple[DFAS, int, RawNode]]:
|
) -> list[tuple[DFAS, int, RawNode]]:
|
||||||
"""Nodeless stack copy."""
|
"""Nodeless stack copy."""
|
||||||
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
|
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
|
||||||
|
|
||||||
|
|
||||||
class Recorder:
|
class Recorder:
|
||||||
def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None:
|
def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None:
|
||||||
self.parser = parser
|
self.parser = parser
|
||||||
self._ilabels = ilabels
|
self._ilabels = ilabels
|
||||||
self.context = context # not really matter
|
self.context = context # not really matter
|
||||||
|
|
||||||
self._dead_ilabels: Set[int] = set()
|
self._dead_ilabels: set[int] = set()
|
||||||
self._start_point = self.parser.stack
|
self._start_point = self.parser.stack
|
||||||
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
|
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ilabels(self) -> Set[int]:
|
def ilabels(self) -> set[int]:
|
||||||
return self._dead_ilabels.symmetric_difference(self._ilabels)
|
return self._dead_ilabels.symmetric_difference(self._ilabels)
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
@ -233,9 +221,9 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
|
|||||||
# where children is a list of nodes or None, and context may be None.
|
# where children is a list of nodes or None, and context may be None.
|
||||||
newnode: RawNode = (start, None, None, [])
|
newnode: RawNode = (start, None, None, [])
|
||||||
stackentry = (self.grammar.dfas[start], 0, newnode)
|
stackentry = (self.grammar.dfas[start], 0, newnode)
|
||||||
self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry]
|
self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry]
|
||||||
self.rootnode: Optional[NL] = None
|
self.rootnode: Optional[NL] = None
|
||||||
self.used_names: Set[str] = set()
|
self.used_names: set[str] = set()
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
self.last_token = None
|
self.last_token = None
|
||||||
|
|
||||||
@ -333,7 +321,7 @@ def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> boo
|
|||||||
# No success finding a transition
|
# No success finding a transition
|
||||||
raise ParseError("bad input", type, value, context)
|
raise ParseError("bad input", type, value, context)
|
||||||
|
|
||||||
def classify(self, type: int, value: str, context: Context) -> List[int]:
|
def classify(self, type: int, value: str, context: Context) -> list[int]:
|
||||||
"""Turn a token into a label. (Internal)
|
"""Turn a token into a label. (Internal)
|
||||||
|
|
||||||
Depending on whether the value is a soft-keyword or not,
|
Depending on whether the value is a soft-keyword or not,
|
||||||
|
@ -2,18 +2,7 @@
|
|||||||
# Licensed to PSF under a Contributor Agreement.
|
# Licensed to PSF under a Contributor Agreement.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from typing import (
|
from typing import IO, Any, Iterator, NoReturn, Optional, Sequence, Union
|
||||||
IO,
|
|
||||||
Any,
|
|
||||||
Dict,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
NoReturn,
|
|
||||||
Optional,
|
|
||||||
Sequence,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from blib2to3.pgen2 import grammar, token, tokenize
|
from blib2to3.pgen2 import grammar, token, tokenize
|
||||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||||
@ -29,7 +18,7 @@ class ParserGenerator:
|
|||||||
filename: Path
|
filename: Path
|
||||||
stream: IO[str]
|
stream: IO[str]
|
||||||
generator: Iterator[GoodTokenInfo]
|
generator: Iterator[GoodTokenInfo]
|
||||||
first: Dict[str, Optional[Dict[str, int]]]
|
first: dict[str, Optional[dict[str, int]]]
|
||||||
|
|
||||||
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
||||||
close_stream = None
|
close_stream = None
|
||||||
@ -71,7 +60,7 @@ def make_grammar(self) -> PgenGrammar:
|
|||||||
c.start = c.symbol2number[self.startsymbol]
|
c.start = c.symbol2number[self.startsymbol]
|
||||||
return c
|
return c
|
||||||
|
|
||||||
def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]:
|
def make_first(self, c: PgenGrammar, name: str) -> dict[int, int]:
|
||||||
rawfirst = self.first[name]
|
rawfirst = self.first[name]
|
||||||
assert rawfirst is not None
|
assert rawfirst is not None
|
||||||
first = {}
|
first = {}
|
||||||
@ -144,7 +133,7 @@ def calcfirst(self, name: str) -> None:
|
|||||||
dfa = self.dfas[name]
|
dfa = self.dfas[name]
|
||||||
self.first[name] = None # dummy to detect left recursion
|
self.first[name] = None # dummy to detect left recursion
|
||||||
state = dfa[0]
|
state = dfa[0]
|
||||||
totalset: Dict[str, int] = {}
|
totalset: dict[str, int] = {}
|
||||||
overlapcheck = {}
|
overlapcheck = {}
|
||||||
for label in state.arcs:
|
for label in state.arcs:
|
||||||
if label in self.dfas:
|
if label in self.dfas:
|
||||||
@ -161,7 +150,7 @@ def calcfirst(self, name: str) -> None:
|
|||||||
else:
|
else:
|
||||||
totalset[label] = 1
|
totalset[label] = 1
|
||||||
overlapcheck[label] = {label: 1}
|
overlapcheck[label] = {label: 1}
|
||||||
inverse: Dict[str, str] = {}
|
inverse: dict[str, str] = {}
|
||||||
for label, itsfirst in overlapcheck.items():
|
for label, itsfirst in overlapcheck.items():
|
||||||
for symbol in itsfirst:
|
for symbol in itsfirst:
|
||||||
if symbol in inverse:
|
if symbol in inverse:
|
||||||
@ -172,7 +161,7 @@ def calcfirst(self, name: str) -> None:
|
|||||||
inverse[symbol] = label
|
inverse[symbol] = label
|
||||||
self.first[name] = totalset
|
self.first[name] = totalset
|
||||||
|
|
||||||
def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
def parse(self) -> tuple[dict[str, list["DFAState"]], str]:
|
||||||
dfas = {}
|
dfas = {}
|
||||||
startsymbol: Optional[str] = None
|
startsymbol: Optional[str] = None
|
||||||
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
||||||
@ -197,7 +186,7 @@ def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
|||||||
assert startsymbol is not None
|
assert startsymbol is not None
|
||||||
return dfas, startsymbol
|
return dfas, startsymbol
|
||||||
|
|
||||||
def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
def make_dfa(self, start: "NFAState", finish: "NFAState") -> list["DFAState"]:
|
||||||
# To turn an NFA into a DFA, we define the states of the DFA
|
# To turn an NFA into a DFA, we define the states of the DFA
|
||||||
# to correspond to *sets* of states of the NFA. Then do some
|
# to correspond to *sets* of states of the NFA. Then do some
|
||||||
# state reduction. Let's represent sets as dicts with 1 for
|
# state reduction. Let's represent sets as dicts with 1 for
|
||||||
@ -205,12 +194,12 @@ def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
|||||||
assert isinstance(start, NFAState)
|
assert isinstance(start, NFAState)
|
||||||
assert isinstance(finish, NFAState)
|
assert isinstance(finish, NFAState)
|
||||||
|
|
||||||
def closure(state: NFAState) -> Dict[NFAState, int]:
|
def closure(state: NFAState) -> dict[NFAState, int]:
|
||||||
base: Dict[NFAState, int] = {}
|
base: dict[NFAState, int] = {}
|
||||||
addclosure(state, base)
|
addclosure(state, base)
|
||||||
return base
|
return base
|
||||||
|
|
||||||
def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
def addclosure(state: NFAState, base: dict[NFAState, int]) -> None:
|
||||||
assert isinstance(state, NFAState)
|
assert isinstance(state, NFAState)
|
||||||
if state in base:
|
if state in base:
|
||||||
return
|
return
|
||||||
@ -221,7 +210,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
|||||||
|
|
||||||
states = [DFAState(closure(start), finish)]
|
states = [DFAState(closure(start), finish)]
|
||||||
for state in states: # NB states grows while we're iterating
|
for state in states: # NB states grows while we're iterating
|
||||||
arcs: Dict[str, Dict[NFAState, int]] = {}
|
arcs: dict[str, dict[NFAState, int]] = {}
|
||||||
for nfastate in state.nfaset:
|
for nfastate in state.nfaset:
|
||||||
for label, next in nfastate.arcs:
|
for label, next in nfastate.arcs:
|
||||||
if label is not None:
|
if label is not None:
|
||||||
@ -259,7 +248,7 @@ def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
|
|||||||
for label, next in sorted(state.arcs.items()):
|
for label, next in sorted(state.arcs.items()):
|
||||||
print(" %s -> %d" % (label, dfa.index(next)))
|
print(" %s -> %d" % (label, dfa.index(next)))
|
||||||
|
|
||||||
def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
def simplify_dfa(self, dfa: list["DFAState"]) -> None:
|
||||||
# This is not theoretically optimal, but works well enough.
|
# This is not theoretically optimal, but works well enough.
|
||||||
# Algorithm: repeatedly look for two states that have the same
|
# Algorithm: repeatedly look for two states that have the same
|
||||||
# set of arcs (same labels pointing to the same nodes) and
|
# set of arcs (same labels pointing to the same nodes) and
|
||||||
@ -280,7 +269,7 @@ def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
|||||||
changes = True
|
changes = True
|
||||||
break
|
break
|
||||||
|
|
||||||
def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_rhs(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# RHS: ALT ('|' ALT)*
|
# RHS: ALT ('|' ALT)*
|
||||||
a, z = self.parse_alt()
|
a, z = self.parse_alt()
|
||||||
if self.value != "|":
|
if self.value != "|":
|
||||||
@ -297,7 +286,7 @@ def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
|||||||
z.addarc(zz)
|
z.addarc(zz)
|
||||||
return aa, zz
|
return aa, zz
|
||||||
|
|
||||||
def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_alt(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# ALT: ITEM+
|
# ALT: ITEM+
|
||||||
a, b = self.parse_item()
|
a, b = self.parse_item()
|
||||||
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
|
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
|
||||||
@ -306,7 +295,7 @@ def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
|||||||
b = d
|
b = d
|
||||||
return a, b
|
return a, b
|
||||||
|
|
||||||
def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_item(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
||||||
if self.value == "[":
|
if self.value == "[":
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
@ -326,7 +315,7 @@ def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
|||||||
else:
|
else:
|
||||||
return a, a
|
return a, a
|
||||||
|
|
||||||
def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_atom(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# ATOM: '(' RHS ')' | NAME | STRING
|
# ATOM: '(' RHS ')' | NAME | STRING
|
||||||
if self.value == "(":
|
if self.value == "(":
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
@ -371,7 +360,7 @@ def raise_error(self, msg: str, *args: Any) -> NoReturn:
|
|||||||
|
|
||||||
|
|
||||||
class NFAState:
|
class NFAState:
|
||||||
arcs: List[Tuple[Optional[str], "NFAState"]]
|
arcs: list[tuple[Optional[str], "NFAState"]]
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.arcs = [] # list of (label, NFAState) pairs
|
self.arcs = [] # list of (label, NFAState) pairs
|
||||||
@ -383,11 +372,11 @@ def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
|
|||||||
|
|
||||||
|
|
||||||
class DFAState:
|
class DFAState:
|
||||||
nfaset: Dict[NFAState, Any]
|
nfaset: dict[NFAState, Any]
|
||||||
isfinal: bool
|
isfinal: bool
|
||||||
arcs: Dict[str, "DFAState"]
|
arcs: dict[str, "DFAState"]
|
||||||
|
|
||||||
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
|
def __init__(self, nfaset: dict[NFAState, Any], final: NFAState) -> None:
|
||||||
assert isinstance(nfaset, dict)
|
assert isinstance(nfaset, dict)
|
||||||
assert isinstance(next(iter(nfaset)), NFAState)
|
assert isinstance(next(iter(nfaset)), NFAState)
|
||||||
assert isinstance(final, NFAState)
|
assert isinstance(final, NFAState)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
"""Token constants (from "token.h")."""
|
"""Token constants (from "token.h")."""
|
||||||
|
|
||||||
from typing import Dict, Final
|
from typing import Final
|
||||||
|
|
||||||
# Taken from Python (r53757) and modified to include some tokens
|
# Taken from Python (r53757) and modified to include some tokens
|
||||||
# originally monkeypatched in by pgen2.tokenize
|
# originally monkeypatched in by pgen2.tokenize
|
||||||
@ -74,7 +74,7 @@
|
|||||||
NT_OFFSET: Final = 256
|
NT_OFFSET: Final = 256
|
||||||
# --end constants--
|
# --end constants--
|
||||||
|
|
||||||
tok_name: Final[Dict[int, str]] = {}
|
tok_name: Final[dict[int, str]] = {}
|
||||||
for _name, _value in list(globals().items()):
|
for _name, _value in list(globals().items()):
|
||||||
if type(_value) is int:
|
if type(_value) is int:
|
||||||
tok_name[_value] = _name
|
tok_name[_value] = _name
|
||||||
|
@ -29,18 +29,7 @@
|
|||||||
|
|
||||||
import builtins
|
import builtins
|
||||||
import sys
|
import sys
|
||||||
from typing import (
|
from typing import Callable, Final, Iterable, Iterator, Optional, Pattern, Union
|
||||||
Callable,
|
|
||||||
Final,
|
|
||||||
Iterable,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Pattern,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from blib2to3.pgen2.grammar import Grammar
|
from blib2to3.pgen2.grammar import Grammar
|
||||||
from blib2to3.pgen2.token import (
|
from blib2to3.pgen2.token import (
|
||||||
@ -93,7 +82,7 @@ def maybe(*choices: str) -> str:
|
|||||||
return group(*choices) + "?"
|
return group(*choices) + "?"
|
||||||
|
|
||||||
|
|
||||||
def _combinations(*l: str) -> Set[str]:
|
def _combinations(*l: str) -> set[str]:
|
||||||
return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()}
|
return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()}
|
||||||
|
|
||||||
|
|
||||||
@ -248,7 +237,7 @@ class StopTokenizing(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
Coord = Tuple[int, int]
|
Coord = tuple[int, int]
|
||||||
|
|
||||||
|
|
||||||
def printtoken(
|
def printtoken(
|
||||||
@ -289,12 +278,12 @@ def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None:
|
|||||||
tokeneater(*token_info)
|
tokeneater(*token_info)
|
||||||
|
|
||||||
|
|
||||||
GoodTokenInfo = Tuple[int, str, Coord, Coord, str]
|
GoodTokenInfo = tuple[int, str, Coord, Coord, str]
|
||||||
TokenInfo = Union[Tuple[int, str], GoodTokenInfo]
|
TokenInfo = Union[tuple[int, str], GoodTokenInfo]
|
||||||
|
|
||||||
|
|
||||||
class Untokenizer:
|
class Untokenizer:
|
||||||
tokens: List[str]
|
tokens: list[str]
|
||||||
prev_row: int
|
prev_row: int
|
||||||
prev_col: int
|
prev_col: int
|
||||||
|
|
||||||
@ -324,7 +313,7 @@ def untokenize(self, iterable: Iterable[TokenInfo]) -> str:
|
|||||||
self.prev_col = 0
|
self.prev_col = 0
|
||||||
return "".join(self.tokens)
|
return "".join(self.tokens)
|
||||||
|
|
||||||
def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None:
|
def compat(self, token: tuple[int, str], iterable: Iterable[TokenInfo]) -> None:
|
||||||
startline = False
|
startline = False
|
||||||
indents = []
|
indents = []
|
||||||
toks_append = self.tokens.append
|
toks_append = self.tokens.append
|
||||||
@ -370,7 +359,7 @@ def _get_normal_name(orig_enc: str) -> str:
|
|||||||
return orig_enc
|
return orig_enc
|
||||||
|
|
||||||
|
|
||||||
def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]:
|
def detect_encoding(readline: Callable[[], bytes]) -> tuple[str, list[bytes]]:
|
||||||
"""
|
"""
|
||||||
The detect_encoding() function is used to detect the encoding that should
|
The detect_encoding() function is used to detect the encoding that should
|
||||||
be used to decode a Python source file. It requires one argument, readline,
|
be used to decode a Python source file. It requires one argument, readline,
|
||||||
@ -471,7 +460,7 @@ def is_fstring_start(token: str) -> bool:
|
|||||||
return builtins.any(token.startswith(prefix) for prefix in fstring_prefix)
|
return builtins.any(token.startswith(prefix) for prefix in fstring_prefix)
|
||||||
|
|
||||||
|
|
||||||
def _split_fstring_start_and_middle(token: str) -> Tuple[str, str]:
|
def _split_fstring_start_and_middle(token: str) -> tuple[str, str]:
|
||||||
for prefix in fstring_prefix:
|
for prefix in fstring_prefix:
|
||||||
_, prefix, rest = token.partition(prefix)
|
_, prefix, rest = token.partition(prefix)
|
||||||
if prefix != "":
|
if prefix != "":
|
||||||
@ -525,7 +514,7 @@ class FStringState:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.stack: List[int] = [STATE_NOT_FSTRING]
|
self.stack: list[int] = [STATE_NOT_FSTRING]
|
||||||
|
|
||||||
def is_in_fstring_expression(self) -> bool:
|
def is_in_fstring_expression(self) -> bool:
|
||||||
return self.stack[-1] not in (STATE_MIDDLE, STATE_NOT_FSTRING)
|
return self.stack[-1] not in (STATE_MIDDLE, STATE_NOT_FSTRING)
|
||||||
@ -581,7 +570,7 @@ def generate_tokens(
|
|||||||
logical line; continuation lines are included.
|
logical line; continuation lines are included.
|
||||||
"""
|
"""
|
||||||
lnum = parenlev = continued = 0
|
lnum = parenlev = continued = 0
|
||||||
parenlev_stack: List[int] = []
|
parenlev_stack: list[int] = []
|
||||||
fstring_state = FStringState()
|
fstring_state = FStringState()
|
||||||
formatspec = ""
|
formatspec = ""
|
||||||
numchars: Final[str] = "0123456789"
|
numchars: Final[str] = "0123456789"
|
||||||
@ -598,9 +587,9 @@ def generate_tokens(
|
|||||||
async_def_indent = 0
|
async_def_indent = 0
|
||||||
async_def_nl = False
|
async_def_nl = False
|
||||||
|
|
||||||
strstart: Tuple[int, int]
|
strstart: tuple[int, int]
|
||||||
endprog_stack: List[Pattern[str]] = []
|
endprog_stack: list[Pattern[str]] = []
|
||||||
formatspec_start: Tuple[int, int]
|
formatspec_start: tuple[int, int]
|
||||||
|
|
||||||
while 1: # loop over lines in stream
|
while 1: # loop over lines in stream
|
||||||
try:
|
try:
|
||||||
|
@ -12,18 +12,7 @@
|
|||||||
|
|
||||||
# mypy: allow-untyped-defs, allow-incomplete-defs
|
# mypy: allow-untyped-defs, allow-incomplete-defs
|
||||||
|
|
||||||
from typing import (
|
from typing import Any, Iterable, Iterator, Optional, TypeVar, Union
|
||||||
Any,
|
|
||||||
Dict,
|
|
||||||
Iterable,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from blib2to3.pgen2.grammar import Grammar
|
from blib2to3.pgen2.grammar import Grammar
|
||||||
|
|
||||||
@ -34,7 +23,7 @@
|
|||||||
|
|
||||||
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
|
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
|
||||||
|
|
||||||
_type_reprs: Dict[int, Union[str, int]] = {}
|
_type_reprs: dict[int, Union[str, int]] = {}
|
||||||
|
|
||||||
|
|
||||||
def type_repr(type_num: int) -> Union[str, int]:
|
def type_repr(type_num: int) -> Union[str, int]:
|
||||||
@ -57,8 +46,8 @@ def type_repr(type_num: int) -> Union[str, int]:
|
|||||||
_P = TypeVar("_P", bound="Base")
|
_P = TypeVar("_P", bound="Base")
|
||||||
|
|
||||||
NL = Union["Node", "Leaf"]
|
NL = Union["Node", "Leaf"]
|
||||||
Context = Tuple[str, Tuple[int, int]]
|
Context = tuple[str, tuple[int, int]]
|
||||||
RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]]
|
RawNode = tuple[int, Optional[str], Optional[Context], Optional[list[NL]]]
|
||||||
|
|
||||||
|
|
||||||
class Base:
|
class Base:
|
||||||
@ -74,7 +63,7 @@ class Base:
|
|||||||
# Default values for instance variables
|
# Default values for instance variables
|
||||||
type: int # int: token number (< 256) or symbol number (>= 256)
|
type: int # int: token number (< 256) or symbol number (>= 256)
|
||||||
parent: Optional["Node"] = None # Parent node pointer, or None
|
parent: Optional["Node"] = None # Parent node pointer, or None
|
||||||
children: List[NL] # List of subnodes
|
children: list[NL] # List of subnodes
|
||||||
was_changed: bool = False
|
was_changed: bool = False
|
||||||
was_checked: bool = False
|
was_checked: bool = False
|
||||||
|
|
||||||
@ -135,7 +124,7 @@ def pre_order(self) -> Iterator[NL]:
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def replace(self, new: Union[NL, List[NL]]) -> None:
|
def replace(self, new: Union[NL, list[NL]]) -> None:
|
||||||
"""Replace this node with a new one in the parent."""
|
"""Replace this node with a new one in the parent."""
|
||||||
assert self.parent is not None, str(self)
|
assert self.parent is not None, str(self)
|
||||||
assert new is not None
|
assert new is not None
|
||||||
@ -242,16 +231,16 @@ def get_suffix(self) -> str:
|
|||||||
class Node(Base):
|
class Node(Base):
|
||||||
"""Concrete implementation for interior nodes."""
|
"""Concrete implementation for interior nodes."""
|
||||||
|
|
||||||
fixers_applied: Optional[List[Any]]
|
fixers_applied: Optional[list[Any]]
|
||||||
used_names: Optional[Set[str]]
|
used_names: Optional[set[str]]
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
type: int,
|
type: int,
|
||||||
children: List[NL],
|
children: list[NL],
|
||||||
context: Optional[Any] = None,
|
context: Optional[Any] = None,
|
||||||
prefix: Optional[str] = None,
|
prefix: Optional[str] = None,
|
||||||
fixers_applied: Optional[List[Any]] = None,
|
fixers_applied: Optional[list[Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Initializer.
|
Initializer.
|
||||||
@ -363,12 +352,12 @@ def append_child(self, child: NL) -> None:
|
|||||||
self.invalidate_sibling_maps()
|
self.invalidate_sibling_maps()
|
||||||
|
|
||||||
def invalidate_sibling_maps(self) -> None:
|
def invalidate_sibling_maps(self) -> None:
|
||||||
self.prev_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
self.prev_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||||
self.next_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
self.next_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||||
|
|
||||||
def update_sibling_maps(self) -> None:
|
def update_sibling_maps(self) -> None:
|
||||||
_prev: Dict[int, Optional[NL]] = {}
|
_prev: dict[int, Optional[NL]] = {}
|
||||||
_next: Dict[int, Optional[NL]] = {}
|
_next: dict[int, Optional[NL]] = {}
|
||||||
self.prev_sibling_map = _prev
|
self.prev_sibling_map = _prev
|
||||||
self.next_sibling_map = _next
|
self.next_sibling_map = _next
|
||||||
previous: Optional[NL] = None
|
previous: Optional[NL] = None
|
||||||
@ -384,11 +373,11 @@ class Leaf(Base):
|
|||||||
|
|
||||||
# Default values for instance variables
|
# Default values for instance variables
|
||||||
value: str
|
value: str
|
||||||
fixers_applied: List[Any]
|
fixers_applied: list[Any]
|
||||||
bracket_depth: int
|
bracket_depth: int
|
||||||
# Changed later in brackets.py
|
# Changed later in brackets.py
|
||||||
opening_bracket: Optional["Leaf"] = None
|
opening_bracket: Optional["Leaf"] = None
|
||||||
used_names: Optional[Set[str]]
|
used_names: Optional[set[str]]
|
||||||
_prefix = "" # Whitespace and comments preceding this token in the input
|
_prefix = "" # Whitespace and comments preceding this token in the input
|
||||||
lineno: int = 0 # Line where this token starts in the input
|
lineno: int = 0 # Line where this token starts in the input
|
||||||
column: int = 0 # Column where this token starts in the input
|
column: int = 0 # Column where this token starts in the input
|
||||||
@ -403,7 +392,7 @@ def __init__(
|
|||||||
value: str,
|
value: str,
|
||||||
context: Optional[Context] = None,
|
context: Optional[Context] = None,
|
||||||
prefix: Optional[str] = None,
|
prefix: Optional[str] = None,
|
||||||
fixers_applied: List[Any] = [],
|
fixers_applied: list[Any] = [],
|
||||||
opening_bracket: Optional["Leaf"] = None,
|
opening_bracket: Optional["Leaf"] = None,
|
||||||
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
|
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -421,7 +410,7 @@ def __init__(
|
|||||||
self.value = value
|
self.value = value
|
||||||
if prefix is not None:
|
if prefix is not None:
|
||||||
self._prefix = prefix
|
self._prefix = prefix
|
||||||
self.fixers_applied: Optional[List[Any]] = fixers_applied[:]
|
self.fixers_applied: Optional[list[Any]] = fixers_applied[:]
|
||||||
self.children = []
|
self.children = []
|
||||||
self.opening_bracket = opening_bracket
|
self.opening_bracket = opening_bracket
|
||||||
self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
|
self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
|
||||||
@ -503,7 +492,7 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL:
|
|||||||
return Leaf(type, value or "", context=context)
|
return Leaf(type, value or "", context=context)
|
||||||
|
|
||||||
|
|
||||||
_Results = Dict[str, NL]
|
_Results = dict[str, NL]
|
||||||
|
|
||||||
|
|
||||||
class BasePattern:
|
class BasePattern:
|
||||||
@ -576,7 +565,7 @@ def match(self, node: NL, results: Optional[_Results] = None) -> bool:
|
|||||||
results[self.name] = node
|
results[self.name] = node
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool:
|
def match_seq(self, nodes: list[NL], results: Optional[_Results] = None) -> bool:
|
||||||
"""
|
"""
|
||||||
Does this pattern exactly match a sequence of nodes?
|
Does this pattern exactly match a sequence of nodes?
|
||||||
|
|
||||||
@ -586,7 +575,7 @@ def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool
|
|||||||
return False
|
return False
|
||||||
return self.match(nodes[0], results)
|
return self.match(nodes[0], results)
|
||||||
|
|
||||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||||
"""
|
"""
|
||||||
Generator yielding all matches for this pattern.
|
Generator yielding all matches for this pattern.
|
||||||
|
|
||||||
@ -816,7 +805,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||||
"""
|
"""
|
||||||
Generator yielding matches for a sequence of nodes.
|
Generator yielding matches for a sequence of nodes.
|
||||||
|
|
||||||
@ -861,7 +850,7 @@ def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
|||||||
if hasattr(sys, "getrefcount"):
|
if hasattr(sys, "getrefcount"):
|
||||||
sys.stderr = save_stderr
|
sys.stderr = save_stderr
|
||||||
|
|
||||||
def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||||
"""Helper to iteratively yield the matches."""
|
"""Helper to iteratively yield the matches."""
|
||||||
nodelen = len(nodes)
|
nodelen = len(nodes)
|
||||||
if 0 >= self.min:
|
if 0 >= self.min:
|
||||||
@ -890,7 +879,7 @@ def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
|||||||
new_results.append((c0 + c1, r))
|
new_results.append((c0 + c1, r))
|
||||||
results = new_results
|
results = new_results
|
||||||
|
|
||||||
def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
def _bare_name_matches(self, nodes) -> tuple[int, _Results]:
|
||||||
"""Special optimized matcher for bare_name."""
|
"""Special optimized matcher for bare_name."""
|
||||||
count = 0
|
count = 0
|
||||||
r = {} # type: _Results
|
r = {} # type: _Results
|
||||||
@ -907,7 +896,7 @@ def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
|||||||
r[self.name] = nodes[:count]
|
r[self.name] = nodes[:count]
|
||||||
return count, r
|
return count, r
|
||||||
|
|
||||||
def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]:
|
def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]:
|
||||||
"""Helper to recursively yield the matches."""
|
"""Helper to recursively yield the matches."""
|
||||||
assert self.content is not None
|
assert self.content is not None
|
||||||
if count >= self.min:
|
if count >= self.min:
|
||||||
@ -944,7 +933,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
|||||||
# We only match an empty sequence of nodes in its entirety
|
# We only match an empty sequence of nodes in its entirety
|
||||||
return len(nodes) == 0
|
return len(nodes) == 0
|
||||||
|
|
||||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||||
if self.content is None:
|
if self.content is None:
|
||||||
# Return a match if there is an empty sequence
|
# Return a match if there is an empty sequence
|
||||||
if len(nodes) == 0:
|
if len(nodes) == 0:
|
||||||
@ -957,8 +946,8 @@ def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
|||||||
|
|
||||||
|
|
||||||
def generate_matches(
|
def generate_matches(
|
||||||
patterns: List[BasePattern], nodes: List[NL]
|
patterns: list[BasePattern], nodes: list[NL]
|
||||||
) -> Iterator[Tuple[int, _Results]]:
|
) -> Iterator[tuple[int, _Results]]:
|
||||||
"""
|
"""
|
||||||
Generator yielding matches for a sequence of patterns and nodes.
|
Generator yielding matches for a sequence of patterns and nodes.
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user