Use builtin generics (#4458)
uvx ruff check --output-format concise src --target-version py39 --select UP006 --fix --unsafe-fixes uvx ruff check --output-format concise src --target-version py39 --select F401 --fix plus some manual fixups
This commit is contained in:
parent
2a45cecf29
commit
8fb2add1f7
@ -14,17 +14,13 @@
|
||||
from typing import (
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterator,
|
||||
List,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Set,
|
||||
Sized,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
@ -176,7 +172,7 @@ def read_pyproject_toml(
|
||||
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
|
||||
)
|
||||
|
||||
default_map: Dict[str, Any] = {}
|
||||
default_map: dict[str, Any] = {}
|
||||
if ctx.default_map:
|
||||
default_map.update(ctx.default_map)
|
||||
default_map.update(config)
|
||||
@ -186,9 +182,9 @@ def read_pyproject_toml(
|
||||
|
||||
|
||||
def spellcheck_pyproject_toml_keys(
|
||||
ctx: click.Context, config_keys: List[str], config_file_path: str
|
||||
ctx: click.Context, config_keys: list[str], config_file_path: str
|
||||
) -> None:
|
||||
invalid_keys: List[str] = []
|
||||
invalid_keys: list[str] = []
|
||||
available_config_options = {param.name for param in ctx.command.params}
|
||||
for key in config_keys:
|
||||
if key not in available_config_options:
|
||||
@ -202,8 +198,8 @@ def spellcheck_pyproject_toml_keys(
|
||||
|
||||
|
||||
def target_version_option_callback(
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
||||
) -> List[TargetVersion]:
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||
) -> list[TargetVersion]:
|
||||
"""Compute the target versions from a --target-version flag.
|
||||
|
||||
This is its own function because mypy couldn't infer the type correctly
|
||||
@ -213,8 +209,8 @@ def target_version_option_callback(
|
||||
|
||||
|
||||
def enable_unstable_feature_callback(
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
||||
) -> List[Preview]:
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||
) -> list[Preview]:
|
||||
"""Compute the features from an --enable-unstable-feature flag."""
|
||||
return [Preview[val] for val in v]
|
||||
|
||||
@ -519,7 +515,7 @@ def main( # noqa: C901
|
||||
ctx: click.Context,
|
||||
code: Optional[str],
|
||||
line_length: int,
|
||||
target_version: List[TargetVersion],
|
||||
target_version: list[TargetVersion],
|
||||
check: bool,
|
||||
diff: bool,
|
||||
line_ranges: Sequence[str],
|
||||
@ -533,7 +529,7 @@ def main( # noqa: C901
|
||||
skip_magic_trailing_comma: bool,
|
||||
preview: bool,
|
||||
unstable: bool,
|
||||
enable_unstable_feature: List[Preview],
|
||||
enable_unstable_feature: list[Preview],
|
||||
quiet: bool,
|
||||
verbose: bool,
|
||||
required_version: Optional[str],
|
||||
@ -543,7 +539,7 @@ def main( # noqa: C901
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
stdin_filename: Optional[str],
|
||||
workers: Optional[int],
|
||||
src: Tuple[str, ...],
|
||||
src: tuple[str, ...],
|
||||
config: Optional[str],
|
||||
) -> None:
|
||||
"""The uncompromising code formatter."""
|
||||
@ -643,7 +639,7 @@ def main( # noqa: C901
|
||||
enabled_features=set(enable_unstable_feature),
|
||||
)
|
||||
|
||||
lines: List[Tuple[int, int]] = []
|
||||
lines: list[tuple[int, int]] = []
|
||||
if line_ranges:
|
||||
if ipynb:
|
||||
err("Cannot use --line-ranges with ipynb files.")
|
||||
@ -733,7 +729,7 @@ def main( # noqa: C901
|
||||
def get_sources(
|
||||
*,
|
||||
root: Path,
|
||||
src: Tuple[str, ...],
|
||||
src: tuple[str, ...],
|
||||
quiet: bool,
|
||||
verbose: bool,
|
||||
include: Pattern[str],
|
||||
@ -742,14 +738,14 @@ def get_sources(
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
report: "Report",
|
||||
stdin_filename: Optional[str],
|
||||
) -> Set[Path]:
|
||||
) -> set[Path]:
|
||||
"""Compute the set of files to be formatted."""
|
||||
sources: Set[Path] = set()
|
||||
sources: set[Path] = set()
|
||||
|
||||
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
||||
using_default_exclude = exclude is None
|
||||
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
|
||||
gitignore: Optional[Dict[Path, PathSpec]] = None
|
||||
gitignore: Optional[dict[Path, PathSpec]] = None
|
||||
root_gitignore = get_gitignore(root)
|
||||
|
||||
for s in src:
|
||||
@ -841,7 +837,7 @@ def reformat_code(
|
||||
mode: Mode,
|
||||
report: Report,
|
||||
*,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> None:
|
||||
"""
|
||||
Reformat and print out `content` without spawning child processes.
|
||||
@ -874,7 +870,7 @@ def reformat_one(
|
||||
mode: Mode,
|
||||
report: "Report",
|
||||
*,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> None:
|
||||
"""Reformat a single file under `src` without spawning child processes.
|
||||
|
||||
@ -930,7 +926,7 @@ def format_file_in_place(
|
||||
write_back: WriteBack = WriteBack.NO,
|
||||
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
|
||||
*,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> bool:
|
||||
"""Format file under `src` path. Return True if changed.
|
||||
|
||||
@ -997,7 +993,7 @@ def format_stdin_to_stdout(
|
||||
content: Optional[str] = None,
|
||||
write_back: WriteBack = WriteBack.NO,
|
||||
mode: Mode,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> bool:
|
||||
"""Format file on stdin. Return True if changed.
|
||||
|
||||
@ -1048,7 +1044,7 @@ def check_stability_and_equivalence(
|
||||
dst_contents: str,
|
||||
*,
|
||||
mode: Mode,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> None:
|
||||
"""Perform stability and equivalence checks.
|
||||
|
||||
@ -1065,7 +1061,7 @@ def format_file_contents(
|
||||
*,
|
||||
fast: bool,
|
||||
mode: Mode,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> FileContent:
|
||||
"""Reformat contents of a file and return new contents.
|
||||
|
||||
@ -1196,7 +1192,7 @@ def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileCon
|
||||
|
||||
|
||||
def format_str(
|
||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
||||
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||
) -> str:
|
||||
"""Reformat a string and return new contents.
|
||||
|
||||
@ -1243,10 +1239,10 @@ def f(
|
||||
|
||||
|
||||
def _format_str_once(
|
||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
||||
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||
) -> str:
|
||||
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
|
||||
dst_blocks: List[LinesBlock] = []
|
||||
dst_blocks: list[LinesBlock] = []
|
||||
if mode.target_versions:
|
||||
versions = mode.target_versions
|
||||
else:
|
||||
@ -1296,7 +1292,7 @@ def _format_str_once(
|
||||
return "".join(dst_contents)
|
||||
|
||||
|
||||
def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
||||
def decode_bytes(src: bytes) -> tuple[FileContent, Encoding, NewLine]:
|
||||
"""Return a tuple of (decoded_contents, encoding, newline).
|
||||
|
||||
`newline` is either CRLF or LF but `decoded_contents` is decoded with
|
||||
@ -1314,8 +1310,8 @@ def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
||||
|
||||
|
||||
def get_features_used( # noqa: C901
|
||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
||||
) -> Set[Feature]:
|
||||
node: Node, *, future_imports: Optional[set[str]] = None
|
||||
) -> set[Feature]:
|
||||
"""Return a set of (relatively) new Python features used in this file.
|
||||
|
||||
Currently looking for:
|
||||
@ -1333,7 +1329,7 @@ def get_features_used( # noqa: C901
|
||||
- except* clause;
|
||||
- variadic generics;
|
||||
"""
|
||||
features: Set[Feature] = set()
|
||||
features: set[Feature] = set()
|
||||
if future_imports:
|
||||
features |= {
|
||||
FUTURE_FLAG_TO_FEATURE[future_import]
|
||||
@ -1471,8 +1467,8 @@ def _contains_asexpr(node: Union[Node, Leaf]) -> bool:
|
||||
|
||||
|
||||
def detect_target_versions(
|
||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
||||
) -> Set[TargetVersion]:
|
||||
node: Node, *, future_imports: Optional[set[str]] = None
|
||||
) -> set[TargetVersion]:
|
||||
"""Detect the version to target based on the nodes used."""
|
||||
features = get_features_used(node, future_imports=future_imports)
|
||||
return {
|
||||
@ -1480,11 +1476,11 @@ def detect_target_versions(
|
||||
}
|
||||
|
||||
|
||||
def get_future_imports(node: Node) -> Set[str]:
|
||||
def get_future_imports(node: Node) -> set[str]:
|
||||
"""Return a set of __future__ imports in the file."""
|
||||
imports: Set[str] = set()
|
||||
imports: set[str] = set()
|
||||
|
||||
def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
|
||||
def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]:
|
||||
for child in children:
|
||||
if isinstance(child, Leaf):
|
||||
if child.type == token.NAME:
|
||||
@ -1571,7 +1567,7 @@ def assert_equivalent(src: str, dst: str) -> None:
|
||||
|
||||
|
||||
def assert_stable(
|
||||
src: str, dst: str, mode: Mode, *, lines: Collection[Tuple[int, int]] = ()
|
||||
src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = ()
|
||||
) -> None:
|
||||
"""Raise AssertionError if `dst` reformats differently the second time."""
|
||||
if lines:
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Generated by make_width_table.py
|
||||
# wcwidth 0.2.6
|
||||
# Unicode 15.0.0
|
||||
from typing import Final, List, Tuple
|
||||
from typing import Final
|
||||
|
||||
WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [
|
||||
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
|
||||
(0, 0, 0),
|
||||
(1, 31, -1),
|
||||
(127, 159, -1),
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""Builds on top of nodes.py to track brackets."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Final, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
||||
from typing import Final, Iterable, Optional, Sequence, Union
|
||||
|
||||
from black.nodes import (
|
||||
BRACKET,
|
||||
@ -60,12 +60,12 @@ class BracketTracker:
|
||||
"""Keeps track of brackets on a line."""
|
||||
|
||||
depth: int = 0
|
||||
bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||
delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
|
||||
bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||
delimiters: dict[LeafID, Priority] = field(default_factory=dict)
|
||||
previous: Optional[Leaf] = None
|
||||
_for_loop_depths: List[int] = field(default_factory=list)
|
||||
_lambda_argument_depths: List[int] = field(default_factory=list)
|
||||
invisible: List[Leaf] = field(default_factory=list)
|
||||
_for_loop_depths: list[int] = field(default_factory=list)
|
||||
_lambda_argument_depths: list[int] = field(default_factory=list)
|
||||
invisible: list[Leaf] = field(default_factory=list)
|
||||
|
||||
def mark(self, leaf: Leaf) -> None:
|
||||
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
||||
@ -353,7 +353,7 @@ def max_delimiter_priority_in_atom(node: LN) -> Priority:
|
||||
return 0
|
||||
|
||||
|
||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> Set[LeafID]:
|
||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]:
|
||||
"""Return leaves that are inside matching brackets.
|
||||
|
||||
The input `leaves` can have non-matching brackets at the head or tail parts.
|
||||
|
@ -7,7 +7,7 @@
|
||||
import tempfile
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, NamedTuple, Set, Tuple
|
||||
from typing import Iterable, NamedTuple
|
||||
|
||||
from platformdirs import user_cache_dir
|
||||
|
||||
@ -55,7 +55,7 @@ def get_cache_file(mode: Mode) -> Path:
|
||||
class Cache:
|
||||
mode: Mode
|
||||
cache_file: Path
|
||||
file_data: Dict[str, FileData] = field(default_factory=dict)
|
||||
file_data: dict[str, FileData] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def read(cls, mode: Mode) -> Self:
|
||||
@ -76,7 +76,7 @@ def read(cls, mode: Mode) -> Self:
|
||||
|
||||
with cache_file.open("rb") as fobj:
|
||||
try:
|
||||
data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)
|
||||
data: dict[str, tuple[float, int, str]] = pickle.load(fobj)
|
||||
file_data = {k: FileData(*v) for k, v in data.items()}
|
||||
except (pickle.UnpicklingError, ValueError, IndexError):
|
||||
return cls(mode, cache_file)
|
||||
@ -114,14 +114,14 @@ def is_changed(self, source: Path) -> bool:
|
||||
return True
|
||||
return False
|
||||
|
||||
def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
|
||||
def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]:
|
||||
"""Split an iterable of paths in `sources` into two sets.
|
||||
|
||||
The first contains paths of files that modified on disk or are not in the
|
||||
cache. The other contains paths to non-modified files.
|
||||
"""
|
||||
changed: Set[Path] = set()
|
||||
done: Set[Path] = set()
|
||||
changed: set[Path] = set()
|
||||
done: set[Path] = set()
|
||||
for src in sources:
|
||||
if self.is_changed(src):
|
||||
changed.add(src)
|
||||
@ -140,7 +140,7 @@ def write(self, sources: Iterable[Path]) -> None:
|
||||
dir=str(self.cache_file.parent), delete=False
|
||||
) as f:
|
||||
# We store raw tuples in the cache because it's faster.
|
||||
data: Dict[str, Tuple[float, int, str]] = {
|
||||
data: dict[str, tuple[float, int, str]] = {
|
||||
k: (*v,) for k, v in self.file_data.items()
|
||||
}
|
||||
pickle.dump(data, f, protocol=4)
|
||||
|
@ -1,7 +1,7 @@
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import Collection, Final, Iterator, List, Optional, Tuple, Union
|
||||
from typing import Collection, Final, Iterator, Optional, Union
|
||||
|
||||
from black.mode import Mode, Preview
|
||||
from black.nodes import (
|
||||
@ -77,9 +77,9 @@ def generate_comments(leaf: LN) -> Iterator[Leaf]:
|
||||
|
||||
|
||||
@lru_cache(maxsize=4096)
|
||||
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
|
||||
def list_comments(prefix: str, *, is_endmarker: bool) -> list[ProtoComment]:
|
||||
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
||||
result: List[ProtoComment] = []
|
||||
result: list[ProtoComment] = []
|
||||
if not prefix or "#" not in prefix:
|
||||
return result
|
||||
|
||||
@ -166,7 +166,7 @@ def make_comment(content: str) -> str:
|
||||
|
||||
|
||||
def normalize_fmt_off(
|
||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
||||
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||
) -> None:
|
||||
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
||||
try_again = True
|
||||
@ -175,7 +175,7 @@ def normalize_fmt_off(
|
||||
|
||||
|
||||
def convert_one_fmt_off_pair(
|
||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
||||
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||
) -> bool:
|
||||
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
||||
|
||||
@ -336,7 +336,7 @@ def _generate_ignored_nodes_from_fmt_skip(
|
||||
# statements. The ignored nodes should be previous siblings of the
|
||||
# parent suite node.
|
||||
leaf.prefix = ""
|
||||
ignored_nodes: List[LN] = []
|
||||
ignored_nodes: list[LN] = []
|
||||
parent_sibling = parent.prev_sibling
|
||||
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
||||
ignored_nodes.insert(0, parent_sibling)
|
||||
@ -376,7 +376,7 @@ def children_contains_fmt_on(container: LN) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def contains_pragma_comment(comment_list: List[Leaf]) -> bool:
|
||||
def contains_pragma_comment(comment_list: list[Leaf]) -> bool:
|
||||
"""
|
||||
Returns:
|
||||
True iff one of the comments in @comment_list is a pragma used by one
|
||||
|
@ -13,7 +13,7 @@
|
||||
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
||||
from multiprocessing import Manager
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Optional, Set
|
||||
from typing import Any, Iterable, Optional
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
@ -69,7 +69,7 @@ def shutdown(loop: asyncio.AbstractEventLoop) -> None:
|
||||
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
||||
@mypyc_attr(patchable=True)
|
||||
def reformat_many(
|
||||
sources: Set[Path],
|
||||
sources: set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
@ -119,7 +119,7 @@ def reformat_many(
|
||||
|
||||
|
||||
async def schedule_formatting(
|
||||
sources: Set[Path],
|
||||
sources: set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
|
@ -1,5 +1,5 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Iterator, List, TypeVar, Union
|
||||
from typing import Any, Iterator, TypeVar, Union
|
||||
|
||||
from black.nodes import Visitor
|
||||
from black.output import out
|
||||
@ -14,7 +14,7 @@
|
||||
@dataclass
|
||||
class DebugVisitor(Visitor[T]):
|
||||
tree_depth: int = 0
|
||||
list_output: List[str] = field(default_factory=list)
|
||||
list_output: list[str] = field(default_factory=list)
|
||||
print_output: bool = True
|
||||
|
||||
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
||||
|
@ -6,14 +6,11 @@
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
@ -43,7 +40,7 @@
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _load_toml(path: Union[Path, str]) -> Dict[str, Any]:
|
||||
def _load_toml(path: Union[Path, str]) -> dict[str, Any]:
|
||||
with open(path, "rb") as f:
|
||||
return tomllib.load(f)
|
||||
|
||||
@ -56,7 +53,7 @@ def _cached_resolve(path: Path) -> Path:
|
||||
@lru_cache
|
||||
def find_project_root(
|
||||
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
||||
) -> Tuple[Path, str]:
|
||||
) -> tuple[Path, str]:
|
||||
"""Return a directory containing .git, .hg, or pyproject.toml.
|
||||
|
||||
pyproject.toml files are only considered if they contain a [tool.black]
|
||||
@ -106,7 +103,7 @@ def find_project_root(
|
||||
|
||||
|
||||
def find_pyproject_toml(
|
||||
path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None
|
||||
path_search_start: tuple[str, ...], stdin_filename: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
||||
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
||||
@ -128,13 +125,13 @@ def find_pyproject_toml(
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
||||
def parse_pyproject_toml(path_config: str) -> dict[str, Any]:
|
||||
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
||||
|
||||
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
||||
"""
|
||||
pyproject_toml = _load_toml(path_config)
|
||||
config: Dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||
config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
||||
|
||||
if "target_version" not in config:
|
||||
@ -146,8 +143,8 @@ def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
||||
|
||||
|
||||
def infer_target_version(
|
||||
pyproject_toml: Dict[str, Any],
|
||||
) -> Optional[List[TargetVersion]]:
|
||||
pyproject_toml: dict[str, Any],
|
||||
) -> Optional[list[TargetVersion]]:
|
||||
"""Infer Black's target version from the project metadata in pyproject.toml.
|
||||
|
||||
Supports the PyPA standard format (PEP 621):
|
||||
@ -170,7 +167,7 @@ def infer_target_version(
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]:
|
||||
def parse_req_python_version(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
||||
@ -185,7 +182,7 @@ def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersio
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]:
|
||||
def parse_req_python_specifier(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
||||
@ -196,7 +193,7 @@ def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVers
|
||||
return None
|
||||
|
||||
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
||||
compatible_versions: List[str] = list(specifier_set.filter(target_version_map))
|
||||
compatible_versions: list[str] = list(specifier_set.filter(target_version_map))
|
||||
if compatible_versions:
|
||||
return [target_version_map[v] for v in compatible_versions]
|
||||
return None
|
||||
@ -251,7 +248,7 @@ def find_user_pyproject_toml() -> Path:
|
||||
def get_gitignore(root: Path) -> PathSpec:
|
||||
"""Return a PathSpec matching gitignore content if present."""
|
||||
gitignore = root / ".gitignore"
|
||||
lines: List[str] = []
|
||||
lines: list[str] = []
|
||||
if gitignore.is_file():
|
||||
with gitignore.open(encoding="utf-8") as gf:
|
||||
lines = gf.readlines()
|
||||
@ -302,7 +299,7 @@ def best_effort_relative_path(path: Path, root: Path) -> Path:
|
||||
def _path_is_ignored(
|
||||
root_relative_path: str,
|
||||
root: Path,
|
||||
gitignore_dict: Dict[Path, PathSpec],
|
||||
gitignore_dict: dict[Path, PathSpec],
|
||||
) -> bool:
|
||||
path = root / root_relative_path
|
||||
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
||||
@ -335,7 +332,7 @@ def gen_python_files(
|
||||
extend_exclude: Optional[Pattern[str]],
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
report: Report,
|
||||
gitignore_dict: Optional[Dict[Path, PathSpec]],
|
||||
gitignore_dict: Optional[dict[Path, PathSpec]],
|
||||
*,
|
||||
verbose: bool,
|
||||
quiet: bool,
|
||||
|
@ -7,7 +7,7 @@
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from importlib.util import find_spec
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Optional
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
@ -64,7 +64,7 @@ def jupyter_dependencies_are_installed(*, warn: bool) -> bool:
|
||||
return installed
|
||||
|
||||
|
||||
def remove_trailing_semicolon(src: str) -> Tuple[str, bool]:
|
||||
def remove_trailing_semicolon(src: str) -> tuple[str, bool]:
|
||||
"""Remove trailing semicolon from Jupyter notebook cell.
|
||||
|
||||
For example,
|
||||
@ -120,7 +120,7 @@ def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
|
||||
return str(tokens_to_src(tokens))
|
||||
|
||||
|
||||
def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
||||
def mask_cell(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Mask IPython magics so content becomes parseable Python code.
|
||||
|
||||
For example,
|
||||
@ -135,7 +135,7 @@ def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
||||
|
||||
The replacements are returned, along with the transformed code.
|
||||
"""
|
||||
replacements: List[Replacement] = []
|
||||
replacements: list[Replacement] = []
|
||||
try:
|
||||
ast.parse(src)
|
||||
except SyntaxError:
|
||||
@ -186,7 +186,7 @@ def get_token(src: str, magic: str) -> str:
|
||||
return f'"{token}"'
|
||||
|
||||
|
||||
def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Replace cell magic with token.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
@ -203,7 +203,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
|
||||
The replacement, along with the transformed code, is returned.
|
||||
"""
|
||||
replacements: List[Replacement] = []
|
||||
replacements: list[Replacement] = []
|
||||
|
||||
tree = ast.parse(src)
|
||||
|
||||
@ -217,7 +217,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
||||
|
||||
|
||||
def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
def replace_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Replace magics within body of cell.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
@ -258,7 +258,7 @@ def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
return "\n".join(new_srcs), replacements
|
||||
|
||||
|
||||
def unmask_cell(src: str, replacements: List[Replacement]) -> str:
|
||||
def unmask_cell(src: str, replacements: list[Replacement]) -> str:
|
||||
"""Remove replacements from cell.
|
||||
|
||||
For example
|
||||
@ -291,7 +291,7 @@ def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
||||
)
|
||||
|
||||
|
||||
def _get_str_args(args: List[ast.expr]) -> List[str]:
|
||||
def _get_str_args(args: list[ast.expr]) -> list[str]:
|
||||
str_args = []
|
||||
for arg in args:
|
||||
assert isinstance(arg, ast.Constant) and isinstance(arg.value, str)
|
||||
@ -375,7 +375,7 @@ class MagicFinder(ast.NodeVisitor):
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list)
|
||||
self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list)
|
||||
|
||||
def visit_Assign(self, node: ast.Assign) -> None:
|
||||
"""Look for system assign magics.
|
||||
|
@ -7,7 +7,7 @@
|
||||
from dataclasses import replace
|
||||
from enum import Enum, auto
|
||||
from functools import partial, wraps
|
||||
from typing import Collection, Iterator, List, Optional, Set, Union, cast
|
||||
from typing import Collection, Iterator, Optional, Union, cast
|
||||
|
||||
from black.brackets import (
|
||||
COMMA_PRIORITY,
|
||||
@ -197,7 +197,7 @@ def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
|
||||
yield from self.line(-1)
|
||||
|
||||
def visit_stmt(
|
||||
self, node: Node, keywords: Set[str], parens: Set[str]
|
||||
self, node: Node, keywords: set[str], parens: set[str]
|
||||
) -> Iterator[Line]:
|
||||
"""Visit a statement.
|
||||
|
||||
@ -559,7 +559,7 @@ def __post_init__(self) -> None:
|
||||
self.current_line = Line(mode=self.mode)
|
||||
|
||||
v = self.visit_stmt
|
||||
Ø: Set[str] = set()
|
||||
Ø: set[str] = set()
|
||||
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
|
||||
self.visit_if_stmt = partial(
|
||||
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
|
||||
@ -626,7 +626,7 @@ def transform_line(
|
||||
string_split = StringSplitter(ll, sn)
|
||||
string_paren_wrap = StringParenWrapper(ll, sn)
|
||||
|
||||
transformers: List[Transformer]
|
||||
transformers: list[Transformer]
|
||||
if (
|
||||
not line.contains_uncollapsable_type_comments()
|
||||
and not line.should_split_rhs
|
||||
@ -726,7 +726,7 @@ def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool:
|
||||
"""If a funcdef has a magic trailing comma in the return type, then we should first
|
||||
split the line with rhs to respect the comma.
|
||||
"""
|
||||
return_type_leaves: List[Leaf] = []
|
||||
return_type_leaves: list[Leaf] = []
|
||||
in_return_type = False
|
||||
|
||||
for leaf in line.leaves:
|
||||
@ -768,9 +768,9 @@ def left_hand_split(
|
||||
Prefer RHS otherwise. This is why this function is not symmetrical with
|
||||
:func:`right_hand_split` which also handles optional parentheses.
|
||||
"""
|
||||
tail_leaves: List[Leaf] = []
|
||||
body_leaves: List[Leaf] = []
|
||||
head_leaves: List[Leaf] = []
|
||||
tail_leaves: list[Leaf] = []
|
||||
body_leaves: list[Leaf] = []
|
||||
head_leaves: list[Leaf] = []
|
||||
current_leaves = head_leaves
|
||||
matching_bracket: Optional[Leaf] = None
|
||||
for leaf in line.leaves:
|
||||
@ -836,9 +836,9 @@ def _first_right_hand_split(
|
||||
_maybe_split_omitting_optional_parens to get an opinion whether to prefer
|
||||
splitting on the right side of an assignment statement.
|
||||
"""
|
||||
tail_leaves: List[Leaf] = []
|
||||
body_leaves: List[Leaf] = []
|
||||
head_leaves: List[Leaf] = []
|
||||
tail_leaves: list[Leaf] = []
|
||||
body_leaves: list[Leaf] = []
|
||||
head_leaves: list[Leaf] = []
|
||||
current_leaves = tail_leaves
|
||||
opening_bracket: Optional[Leaf] = None
|
||||
closing_bracket: Optional[Leaf] = None
|
||||
@ -869,8 +869,8 @@ def _first_right_hand_split(
|
||||
and tail_leaves[0].opening_bracket is head_leaves[-1]
|
||||
):
|
||||
inner_body_leaves = list(body_leaves)
|
||||
hugged_opening_leaves: List[Leaf] = []
|
||||
hugged_closing_leaves: List[Leaf] = []
|
||||
hugged_opening_leaves: list[Leaf] = []
|
||||
hugged_closing_leaves: list[Leaf] = []
|
||||
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
|
||||
unpacking_offset: int = 1 if is_unpacking else 0
|
||||
while (
|
||||
@ -1080,7 +1080,7 @@ def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None
|
||||
|
||||
|
||||
def _ensure_trailing_comma(
|
||||
leaves: List[Leaf], original: Line, opening_bracket: Leaf
|
||||
leaves: list[Leaf], original: Line, opening_bracket: Leaf
|
||||
) -> bool:
|
||||
if not leaves:
|
||||
return False
|
||||
@ -1121,7 +1121,7 @@ def _ensure_trailing_comma(
|
||||
|
||||
|
||||
def bracket_split_build_line(
|
||||
leaves: List[Leaf],
|
||||
leaves: list[Leaf],
|
||||
original: Line,
|
||||
opening_bracket: Leaf,
|
||||
*,
|
||||
@ -1150,7 +1150,7 @@ def bracket_split_build_line(
|
||||
leaves.insert(i + 1, new_comma)
|
||||
break
|
||||
|
||||
leaves_to_track: Set[LeafID] = set()
|
||||
leaves_to_track: set[LeafID] = set()
|
||||
if component is _BracketSplitComponent.head:
|
||||
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
||||
# Populate the line
|
||||
@ -1342,7 +1342,7 @@ def append_to_line(leaf: Leaf) -> Iterator[Line]:
|
||||
|
||||
|
||||
def normalize_invisible_parens( # noqa: C901
|
||||
node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature]
|
||||
node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature]
|
||||
) -> None:
|
||||
"""Make existing optional parentheses invisible or create new ones.
|
||||
|
||||
@ -1692,7 +1692,7 @@ def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
|
||||
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]:
|
||||
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
|
||||
|
||||
Brackets can be omitted if the entire trailer up to and including
|
||||
@ -1703,14 +1703,14 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf
|
||||
the one that needs to explode are omitted.
|
||||
"""
|
||||
|
||||
omit: Set[LeafID] = set()
|
||||
omit: set[LeafID] = set()
|
||||
if not line.magic_trailing_comma:
|
||||
yield omit
|
||||
|
||||
length = 4 * line.depth
|
||||
opening_bracket: Optional[Leaf] = None
|
||||
closing_bracket: Optional[Leaf] = None
|
||||
inner_brackets: Set[LeafID] = set()
|
||||
inner_brackets: set[LeafID] = set()
|
||||
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
|
||||
length += leaf_length
|
||||
if length > line_length:
|
||||
@ -1775,10 +1775,10 @@ def run_transformer(
|
||||
features: Collection[Feature],
|
||||
*,
|
||||
line_str: str = "",
|
||||
) -> List[Line]:
|
||||
) -> list[Line]:
|
||||
if not line_str:
|
||||
line_str = line_to_string(line)
|
||||
result: List[Line] = []
|
||||
result: list[Line] = []
|
||||
for transformed_line in transform(line, features, mode):
|
||||
if str(transformed_line).strip("\n") == line_str:
|
||||
raise CannotTransform("Line transformer returned an unchanged result")
|
||||
|
@ -1,18 +1,7 @@
|
||||
import itertools
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from typing import Callable, Iterator, Optional, Sequence, TypeVar, Union, cast
|
||||
|
||||
from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
|
||||
from black.mode import Mode, Preview
|
||||
@ -52,9 +41,9 @@ class Line:
|
||||
|
||||
mode: Mode = field(repr=False)
|
||||
depth: int = 0
|
||||
leaves: List[Leaf] = field(default_factory=list)
|
||||
leaves: list[Leaf] = field(default_factory=list)
|
||||
# keys ordered like `leaves`
|
||||
comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
|
||||
comments: dict[LeafID, list[Leaf]] = field(default_factory=dict)
|
||||
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
|
||||
inside_brackets: bool = False
|
||||
should_split_rhs: bool = False
|
||||
@ -426,7 +415,7 @@ def append_comment(self, comment: Leaf) -> bool:
|
||||
self.comments.setdefault(id(last_leaf), []).append(comment)
|
||||
return True
|
||||
|
||||
def comments_after(self, leaf: Leaf) -> List[Leaf]:
|
||||
def comments_after(self, leaf: Leaf) -> list[Leaf]:
|
||||
"""Generate comments that should appear directly after `leaf`."""
|
||||
return self.comments.get(id(leaf), [])
|
||||
|
||||
@ -459,13 +448,13 @@ def is_complex_subscript(self, leaf: Leaf) -> bool:
|
||||
|
||||
def enumerate_with_length(
|
||||
self, is_reversed: bool = False
|
||||
) -> Iterator[Tuple[Index, Leaf, int]]:
|
||||
) -> Iterator[tuple[Index, Leaf, int]]:
|
||||
"""Return an enumeration of leaves with their length.
|
||||
|
||||
Stops prematurely on multiline strings and standalone comments.
|
||||
"""
|
||||
op = cast(
|
||||
Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
|
||||
Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]],
|
||||
enumerate_reversed if is_reversed else enumerate,
|
||||
)
|
||||
for index, leaf in op(self.leaves):
|
||||
@ -531,11 +520,11 @@ class LinesBlock:
|
||||
previous_block: Optional["LinesBlock"]
|
||||
original_line: Line
|
||||
before: int = 0
|
||||
content_lines: List[str] = field(default_factory=list)
|
||||
content_lines: list[str] = field(default_factory=list)
|
||||
after: int = 0
|
||||
form_feed: bool = False
|
||||
|
||||
def all_lines(self) -> List[str]:
|
||||
def all_lines(self) -> list[str]:
|
||||
empty_line = str(Line(mode=self.mode))
|
||||
prefix = make_simple_prefix(self.before, self.form_feed, empty_line)
|
||||
return [prefix] + self.content_lines + [empty_line * self.after]
|
||||
@ -554,7 +543,7 @@ class EmptyLineTracker:
|
||||
mode: Mode
|
||||
previous_line: Optional[Line] = None
|
||||
previous_block: Optional[LinesBlock] = None
|
||||
previous_defs: List[Line] = field(default_factory=list)
|
||||
previous_defs: list[Line] = field(default_factory=list)
|
||||
semantic_leading_comment: Optional[LinesBlock] = None
|
||||
|
||||
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
||||
@ -607,7 +596,7 @@ def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
||||
self.previous_block = block
|
||||
return block
|
||||
|
||||
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C901
|
||||
def _maybe_empty_lines(self, current_line: Line) -> tuple[int, int]: # noqa: C901
|
||||
max_allowed = 1
|
||||
if current_line.depth == 0:
|
||||
max_allowed = 1 if self.mode.is_pyi else 2
|
||||
@ -693,7 +682,7 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C9
|
||||
|
||||
def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
||||
self, current_line: Line, before: int, user_had_newline: bool
|
||||
) -> Tuple[int, int]:
|
||||
) -> tuple[int, int]:
|
||||
assert self.previous_line is not None
|
||||
|
||||
if self.previous_line.is_decorator:
|
||||
@ -772,7 +761,7 @@ def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
||||
return newlines, 0
|
||||
|
||||
|
||||
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
||||
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]:
|
||||
"""Like `reversed(enumerate(sequence))` if that were possible."""
|
||||
index = len(sequence) - 1
|
||||
for element in reversed(sequence):
|
||||
@ -781,7 +770,7 @@ def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
||||
|
||||
|
||||
def append_leaves(
|
||||
new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
|
||||
new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Append leaves (taken from @old_line) to @new_line, making sure to fix the
|
||||
@ -838,10 +827,10 @@ def is_line_short_enough( # noqa: C901
|
||||
# Depth (which is based on the existing bracket_depth concept)
|
||||
# is needed to determine nesting level of the MLS.
|
||||
# Includes special case for trailing commas.
|
||||
commas: List[int] = [] # tracks number of commas per depth level
|
||||
commas: list[int] = [] # tracks number of commas per depth level
|
||||
multiline_string: Optional[Leaf] = None
|
||||
# store the leaves that contain parts of the MLS
|
||||
multiline_string_contexts: List[LN] = []
|
||||
multiline_string_contexts: list[LN] = []
|
||||
|
||||
max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS
|
||||
for i, leaf in enumerate(line.leaves):
|
||||
@ -865,7 +854,7 @@ def is_line_short_enough( # noqa: C901
|
||||
if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
|
||||
# Inside brackets, ignore trailing comma
|
||||
# directly after MLS/MLS-containing expression
|
||||
ignore_ctxs: List[Optional[LN]] = [None]
|
||||
ignore_ctxs: list[Optional[LN]] = [None]
|
||||
ignore_ctxs += multiline_string_contexts
|
||||
if (line.inside_brackets or leaf.bracket_depth > 0) and (
|
||||
i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs
|
||||
|
@ -8,7 +8,7 @@
|
||||
from enum import Enum, auto
|
||||
from hashlib import sha256
|
||||
from operator import attrgetter
|
||||
from typing import Dict, Final, Set
|
||||
from typing import Final
|
||||
|
||||
from black.const import DEFAULT_LINE_LENGTH
|
||||
|
||||
@ -64,7 +64,7 @@ class Feature(Enum):
|
||||
}
|
||||
|
||||
|
||||
VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
|
||||
VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = {
|
||||
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
||||
@ -189,7 +189,7 @@ class Feature(Enum):
|
||||
}
|
||||
|
||||
|
||||
def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
|
||||
def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool:
|
||||
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
||||
|
||||
|
||||
@ -213,7 +213,7 @@ class Preview(Enum):
|
||||
pep646_typed_star_arg_type_var_tuple = auto()
|
||||
|
||||
|
||||
UNSTABLE_FEATURES: Set[Preview] = {
|
||||
UNSTABLE_FEATURES: set[Preview] = {
|
||||
# Many issues, see summary in https://github.com/psf/black/issues/4042
|
||||
Preview.string_processing,
|
||||
# See issues #3452 and #4158
|
||||
@ -234,17 +234,17 @@ class Deprecated(UserWarning):
|
||||
|
||||
@dataclass
|
||||
class Mode:
|
||||
target_versions: Set[TargetVersion] = field(default_factory=set)
|
||||
target_versions: set[TargetVersion] = field(default_factory=set)
|
||||
line_length: int = DEFAULT_LINE_LENGTH
|
||||
string_normalization: bool = True
|
||||
is_pyi: bool = False
|
||||
is_ipynb: bool = False
|
||||
skip_source_first_line: bool = False
|
||||
magic_trailing_comma: bool = True
|
||||
python_cell_magics: Set[str] = field(default_factory=set)
|
||||
python_cell_magics: set[str] = field(default_factory=set)
|
||||
preview: bool = False
|
||||
unstable: bool = False
|
||||
enabled_features: Set[Preview] = field(default_factory=set)
|
||||
enabled_features: set[Preview] = field(default_factory=set)
|
||||
|
||||
def __contains__(self, feature: Preview) -> bool:
|
||||
"""
|
||||
|
@ -3,18 +3,7 @@
|
||||
"""
|
||||
|
||||
import sys
|
||||
from typing import (
|
||||
Final,
|
||||
Generic,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing import Final, Generic, Iterator, Literal, Optional, TypeVar, Union
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
@ -462,7 +451,7 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
|
||||
return None
|
||||
|
||||
|
||||
def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool:
|
||||
def prev_siblings_are(node: Optional[LN], tokens: list[Optional[NodeType]]) -> bool:
|
||||
"""Return if the `node` and its previous siblings match types against the provided
|
||||
list of tokens; the provided `node`has its type matched against the last element in
|
||||
the list. `None` can be used as the first element to declare that the start of the
|
||||
@ -634,8 +623,8 @@ def is_tuple_containing_walrus(node: LN) -> bool:
|
||||
def is_one_sequence_between(
|
||||
opening: Leaf,
|
||||
closing: Leaf,
|
||||
leaves: List[Leaf],
|
||||
brackets: Tuple[int, int] = (token.LPAR, token.RPAR),
|
||||
leaves: list[Leaf],
|
||||
brackets: tuple[int, int] = (token.LPAR, token.RPAR),
|
||||
) -> bool:
|
||||
"""Return True if content between `opening` and `closing` is a one-sequence."""
|
||||
if (opening.type, closing.type) != brackets:
|
||||
@ -745,7 +734,7 @@ def is_yield(node: LN) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
|
||||
def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool:
|
||||
"""Return True if `leaf` is a star or double star in a vararg or kwarg.
|
||||
|
||||
If `within` includes VARARGS_PARENTS, this applies to function signatures.
|
||||
|
@ -6,7 +6,7 @@
|
||||
import json
|
||||
import re
|
||||
import tempfile
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from click import echo, style
|
||||
from mypy_extensions import mypyc_attr
|
||||
@ -59,7 +59,7 @@ def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
||||
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
|
||||
|
||||
|
||||
def _splitlines_no_ff(source: str) -> List[str]:
|
||||
def _splitlines_no_ff(source: str) -> list[str]:
|
||||
"""Split a string into lines ignoring form feed and other chars.
|
||||
|
||||
This mimics how the Python parser splits source code.
|
||||
|
@ -5,7 +5,7 @@
|
||||
import ast
|
||||
import sys
|
||||
import warnings
|
||||
from typing import Collection, Iterator, List, Set, Tuple
|
||||
from typing import Collection, Iterator
|
||||
|
||||
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
||||
from black.nodes import syms
|
||||
@ -21,7 +21,7 @@ class InvalidInput(ValueError):
|
||||
"""Raised when input source code fails all parse attempts."""
|
||||
|
||||
|
||||
def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
|
||||
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
|
||||
if not target_versions:
|
||||
# No target_version specified, so try all grammars.
|
||||
return [
|
||||
@ -123,7 +123,7 @@ class ASTSafetyError(Exception):
|
||||
|
||||
|
||||
def _parse_single_version(
|
||||
src: str, version: Tuple[int, int], *, type_comments: bool
|
||||
src: str, version: tuple[int, int], *, type_comments: bool
|
||||
) -> ast.AST:
|
||||
filename = "<unknown>"
|
||||
with warnings.catch_warnings():
|
||||
@ -159,7 +159,7 @@ def parse_ast(src: str) -> ast.AST:
|
||||
def _normalize(lineend: str, value: str) -> str:
|
||||
# To normalize, we strip any leading and trailing space from
|
||||
# each line...
|
||||
stripped: List[str] = [i.strip() for i in value.splitlines()]
|
||||
stripped: list[str] = [i.strip() for i in value.splitlines()]
|
||||
normalized = lineend.join(stripped)
|
||||
# ...and remove any blank lines at the beginning and end of
|
||||
# the whole string
|
||||
@ -172,14 +172,14 @@ def stringify_ast(node: ast.AST) -> Iterator[str]:
|
||||
|
||||
|
||||
def _stringify_ast_with_new_parent(
|
||||
node: ast.AST, parent_stack: List[ast.AST], new_parent: ast.AST
|
||||
node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST
|
||||
) -> Iterator[str]:
|
||||
parent_stack.append(new_parent)
|
||||
yield from _stringify_ast(node, parent_stack)
|
||||
parent_stack.pop()
|
||||
|
||||
|
||||
def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
||||
def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]:
|
||||
if (
|
||||
isinstance(node, ast.Constant)
|
||||
and isinstance(node.value, str)
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
import difflib
|
||||
from dataclasses import dataclass
|
||||
from typing import Collection, Iterator, List, Sequence, Set, Tuple, Union
|
||||
from typing import Collection, Iterator, Sequence, Union
|
||||
|
||||
from black.nodes import (
|
||||
LN,
|
||||
@ -18,8 +18,8 @@
|
||||
from blib2to3.pgen2.token import ASYNC, NEWLINE
|
||||
|
||||
|
||||
def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
||||
lines: List[Tuple[int, int]] = []
|
||||
def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]:
|
||||
lines: list[tuple[int, int]] = []
|
||||
for lines_str in line_ranges:
|
||||
parts = lines_str.split("-")
|
||||
if len(parts) != 2:
|
||||
@ -40,14 +40,14 @@ def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
||||
return lines
|
||||
|
||||
|
||||
def is_valid_line_range(lines: Tuple[int, int]) -> bool:
|
||||
def is_valid_line_range(lines: tuple[int, int]) -> bool:
|
||||
"""Returns whether the line range is valid."""
|
||||
return not lines or lines[0] <= lines[1]
|
||||
|
||||
|
||||
def sanitized_lines(
|
||||
lines: Collection[Tuple[int, int]], src_contents: str
|
||||
) -> Collection[Tuple[int, int]]:
|
||||
lines: Collection[tuple[int, int]], src_contents: str
|
||||
) -> Collection[tuple[int, int]]:
|
||||
"""Returns the valid line ranges for the given source.
|
||||
|
||||
This removes ranges that are entirely outside the valid lines.
|
||||
@ -74,10 +74,10 @@ def sanitized_lines(
|
||||
|
||||
|
||||
def adjusted_lines(
|
||||
lines: Collection[Tuple[int, int]],
|
||||
lines: Collection[tuple[int, int]],
|
||||
original_source: str,
|
||||
modified_source: str,
|
||||
) -> List[Tuple[int, int]]:
|
||||
) -> list[tuple[int, int]]:
|
||||
"""Returns the adjusted line ranges based on edits from the original code.
|
||||
|
||||
This computes the new line ranges by diffing original_source and
|
||||
@ -153,7 +153,7 @@ def adjusted_lines(
|
||||
return new_lines
|
||||
|
||||
|
||||
def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]]) -> None:
|
||||
def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None:
|
||||
"""Converts unchanged lines to STANDALONE_COMMENT.
|
||||
|
||||
The idea is similar to how `# fmt: on/off` is implemented. It also converts the
|
||||
@ -177,7 +177,7 @@ def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]])
|
||||
more formatting to pass (1). However, it's hard to get it correct when
|
||||
incorrect indentations are used. So we defer this to future optimizations.
|
||||
"""
|
||||
lines_set: Set[int] = set()
|
||||
lines_set: set[int] = set()
|
||||
for start, end in lines:
|
||||
lines_set.update(range(start, end + 1))
|
||||
visitor = _TopLevelStatementsVisitor(lines_set)
|
||||
@ -205,7 +205,7 @@ class _TopLevelStatementsVisitor(Visitor[None]):
|
||||
classes/functions/statements.
|
||||
"""
|
||||
|
||||
def __init__(self, lines_set: Set[int]):
|
||||
def __init__(self, lines_set: set[int]):
|
||||
self._lines_set = lines_set
|
||||
|
||||
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
|
||||
@ -249,7 +249,7 @@ def visit_suite(self, node: Node) -> Iterator[None]:
|
||||
_convert_node_to_standalone_comment(semantic_parent)
|
||||
|
||||
|
||||
def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
||||
def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None:
|
||||
"""Converts unchanged to STANDALONE_COMMENT line by line."""
|
||||
for leaf in node.leaves():
|
||||
if leaf.type != NEWLINE:
|
||||
@ -261,7 +261,7 @@ def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
||||
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
|
||||
# Here we need to check `subject_expr`. The `case_block+` will be
|
||||
# checked by their own NEWLINEs.
|
||||
nodes_to_ignore: List[LN] = []
|
||||
nodes_to_ignore: list[LN] = []
|
||||
prev_sibling = leaf.prev_sibling
|
||||
while prev_sibling:
|
||||
nodes_to_ignore.insert(0, prev_sibling)
|
||||
@ -382,7 +382,7 @@ def _leaf_line_end(leaf: Leaf) -> int:
|
||||
return leaf.lineno + str(leaf).count("\n")
|
||||
|
||||
|
||||
def _get_line_range(node_or_nodes: Union[LN, List[LN]]) -> Set[int]:
|
||||
def _get_line_range(node_or_nodes: Union[LN, list[LN]]) -> set[int]:
|
||||
"""Returns the line range of this node or list of nodes."""
|
||||
if isinstance(node_or_nodes, list):
|
||||
nodes = node_or_nodes
|
||||
@ -463,7 +463,7 @@ def _calculate_lines_mappings(
|
||||
modified_source.splitlines(keepends=True),
|
||||
)
|
||||
matching_blocks = matcher.get_matching_blocks()
|
||||
lines_mappings: List[_LinesMapping] = []
|
||||
lines_mappings: list[_LinesMapping] = []
|
||||
# matching_blocks is a sequence of "same block of code ranges", see
|
||||
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
|
||||
# Each block corresponds to a _LinesMapping with is_changed_block=False,
|
||||
|
@ -5,7 +5,7 @@
|
||||
import re
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from typing import Final, List, Match, Pattern, Tuple
|
||||
from typing import Final, Match, Pattern
|
||||
|
||||
from black._width_table import WIDTH_TABLE
|
||||
from blib2to3.pytree import Leaf
|
||||
@ -43,7 +43,7 @@ def has_triple_quotes(string: str) -> bool:
|
||||
return raw_string[:3] in {'"""', "'''"}
|
||||
|
||||
|
||||
def lines_with_leading_tabs_expanded(s: str) -> List[str]:
|
||||
def lines_with_leading_tabs_expanded(s: str) -> list[str]:
|
||||
"""
|
||||
Splits string into lines and expands only leading tabs (following the normal
|
||||
Python rules)
|
||||
@ -242,9 +242,9 @@ def normalize_string_quotes(s: str) -> str:
|
||||
|
||||
def normalize_fstring_quotes(
|
||||
quote: str,
|
||||
middles: List[Leaf],
|
||||
middles: list[Leaf],
|
||||
is_raw_fstring: bool,
|
||||
) -> Tuple[List[Leaf], str]:
|
||||
) -> tuple[list[Leaf], str]:
|
||||
"""Prefer double quotes but only if it doesn't cause more escaping.
|
||||
|
||||
Adds or removes backslashes as appropriate.
|
||||
|
@ -11,16 +11,12 @@
|
||||
Callable,
|
||||
ClassVar,
|
||||
Collection,
|
||||
Dict,
|
||||
Final,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
@ -68,7 +64,7 @@ class CannotTransform(Exception):
|
||||
ParserState = int
|
||||
StringID = int
|
||||
TResult = Result[T, CannotTransform] # (T)ransform Result
|
||||
TMatchResult = TResult[List[Index]]
|
||||
TMatchResult = TResult[list[Index]]
|
||||
|
||||
SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops
|
||||
|
||||
@ -179,7 +175,7 @@ def original_is_simple_lookup_func(
|
||||
return True
|
||||
|
||||
|
||||
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int]) -> bool:
|
||||
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool:
|
||||
"""
|
||||
Handling the determination of is_simple_lookup for the lines prior to the doublestar
|
||||
token. This is required because of the need to isolate the chained expression
|
||||
@ -202,7 +198,7 @@ def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int])
|
||||
|
||||
|
||||
def handle_is_simple_lookup_forward(
|
||||
line: Line, index: int, disallowed: Set[int]
|
||||
line: Line, index: int, disallowed: set[int]
|
||||
) -> bool:
|
||||
"""
|
||||
Handling decision is_simple_lookup for the lines behind the doublestar token.
|
||||
@ -227,7 +223,7 @@ def handle_is_simple_lookup_forward(
|
||||
return True
|
||||
|
||||
|
||||
def is_expression_chained(chained_leaves: List[Leaf]) -> bool:
|
||||
def is_expression_chained(chained_leaves: list[Leaf]) -> bool:
|
||||
"""
|
||||
Function to determine if the variable is a chained call.
|
||||
(e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call)
|
||||
@ -298,7 +294,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
|
||||
@abstractmethod
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
"""
|
||||
Yields:
|
||||
@ -388,8 +384,8 @@ class CustomSplitMapMixin:
|
||||
the resultant substrings go over the configured max line length.
|
||||
"""
|
||||
|
||||
_Key: ClassVar = Tuple[StringID, str]
|
||||
_CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict(
|
||||
_Key: ClassVar = tuple[StringID, str]
|
||||
_CUSTOM_SPLIT_MAP: ClassVar[dict[_Key, tuple[CustomSplit, ...]]] = defaultdict(
|
||||
tuple
|
||||
)
|
||||
|
||||
@ -413,7 +409,7 @@ def add_custom_splits(
|
||||
key = self._get_key(string)
|
||||
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
||||
|
||||
def pop_custom_splits(self, string: str) -> List[CustomSplit]:
|
||||
def pop_custom_splits(self, string: str) -> list[CustomSplit]:
|
||||
"""Custom Split Map Getter Method
|
||||
|
||||
Returns:
|
||||
@ -512,7 +508,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
return TErr("This line has no strings that need merging.")
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
new_line = line
|
||||
|
||||
@ -543,7 +539,7 @@ def do_transform(
|
||||
|
||||
@staticmethod
|
||||
def _remove_backslash_line_continuation_chars(
|
||||
line: Line, string_indices: List[int]
|
||||
line: Line, string_indices: list[int]
|
||||
) -> TResult[Line]:
|
||||
"""
|
||||
Merge strings that were split across multiple lines using
|
||||
@ -584,7 +580,7 @@ def _remove_backslash_line_continuation_chars(
|
||||
return Ok(new_line)
|
||||
|
||||
def _merge_string_group(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> TResult[Line]:
|
||||
"""
|
||||
Merges string groups (i.e. set of adjacent strings).
|
||||
@ -603,7 +599,7 @@ def _merge_string_group(
|
||||
is_valid_index = is_valid_index_factory(LL)
|
||||
|
||||
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
|
||||
merged_string_idx_dict: Dict[int, Tuple[int, Leaf]] = {}
|
||||
merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {}
|
||||
for string_idx in string_indices:
|
||||
vresult = self._validate_msg(line, string_idx)
|
||||
if isinstance(vresult, Err):
|
||||
@ -639,8 +635,8 @@ def _merge_string_group(
|
||||
return Ok(new_line)
|
||||
|
||||
def _merge_one_string_group(
|
||||
self, LL: List[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
||||
) -> Tuple[int, Leaf]:
|
||||
self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
||||
) -> tuple[int, Leaf]:
|
||||
"""
|
||||
Merges one string group where the first string in the group is
|
||||
`LL[string_idx]`.
|
||||
@ -1004,11 +1000,11 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
return TErr("This line has no strings wrapped in parens.")
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
LL = line.leaves
|
||||
|
||||
string_and_rpar_indices: List[int] = []
|
||||
string_and_rpar_indices: list[int] = []
|
||||
for string_idx in string_indices:
|
||||
string_parser = StringParser()
|
||||
rpar_idx = string_parser.parse(LL, string_idx)
|
||||
@ -1031,7 +1027,7 @@ def do_transform(
|
||||
)
|
||||
|
||||
def _transform_to_new_line(
|
||||
self, line: Line, string_and_rpar_indices: List[int]
|
||||
self, line: Line, string_and_rpar_indices: list[int]
|
||||
) -> Line:
|
||||
LL = line.leaves
|
||||
|
||||
@ -1284,7 +1280,7 @@ def _get_max_string_length(self, line: Line, string_idx: int) -> int:
|
||||
return max_string_length
|
||||
|
||||
@staticmethod
|
||||
def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _prefer_paren_wrap_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -1329,14 +1325,14 @@ def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
|
||||
def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:
|
||||
def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]:
|
||||
"""
|
||||
Yields spans corresponding to expressions in a given f-string.
|
||||
Spans are half-open ranges (left inclusive, right exclusive).
|
||||
Assumes the input string is a valid f-string, but will not crash if the input
|
||||
string is invalid.
|
||||
"""
|
||||
stack: List[int] = [] # our curly paren stack
|
||||
stack: list[int] = [] # our curly paren stack
|
||||
i = 0
|
||||
while i < len(s):
|
||||
if s[i] == "{":
|
||||
@ -1499,7 +1495,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
||||
return Ok([string_idx])
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
LL = line.leaves
|
||||
assert len(string_indices) == 1, (
|
||||
@ -1601,7 +1597,7 @@ def more_splits_should_be_made() -> bool:
|
||||
else:
|
||||
return str_width(rest_value) > max_last_string_column()
|
||||
|
||||
string_line_results: List[Ok[Line]] = []
|
||||
string_line_results: list[Ok[Line]] = []
|
||||
while more_splits_should_be_made():
|
||||
if use_custom_breakpoints:
|
||||
# Custom User Split (manual)
|
||||
@ -1730,7 +1726,7 @@ def more_splits_should_be_made() -> bool:
|
||||
last_line.comments = line.comments.copy()
|
||||
yield Ok(last_line)
|
||||
|
||||
def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||
"""
|
||||
Yields:
|
||||
All ranges of @string which, if @string were to be split there,
|
||||
@ -1761,7 +1757,7 @@ def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
|
||||
yield begin, end
|
||||
|
||||
def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||
"""
|
||||
Yields:
|
||||
All ranges of @string which, if @string were to be split there,
|
||||
@ -1772,8 +1768,8 @@ def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
return
|
||||
yield from iter_fexpr_spans(string)
|
||||
|
||||
def _get_illegal_split_indices(self, string: str) -> Set[Index]:
|
||||
illegal_indices: Set[Index] = set()
|
||||
def _get_illegal_split_indices(self, string: str) -> set[Index]:
|
||||
illegal_indices: set[Index] = set()
|
||||
iterators = [
|
||||
self._iter_fexpr_slices(string),
|
||||
self._iter_nameescape_slices(string),
|
||||
@ -1899,7 +1895,7 @@ def _normalize_f_string(self, string: str, prefix: str) -> str:
|
||||
else:
|
||||
return string
|
||||
|
||||
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]:
|
||||
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]:
|
||||
LL = list(leaves)
|
||||
|
||||
string_op_leaves = []
|
||||
@ -2008,7 +2004,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
||||
return TErr("This line does not contain any non-atomic strings.")
|
||||
|
||||
@staticmethod
|
||||
def _return_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _return_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2033,7 +2029,7 @@ def _return_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _else_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _else_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2060,7 +2056,7 @@ def _else_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _assert_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2095,7 +2091,7 @@ def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _assign_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2142,7 +2138,7 @@ def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _dict_or_lambda_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2181,7 +2177,7 @@ def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
LL = line.leaves
|
||||
assert len(string_indices) == 1, (
|
||||
@ -2347,7 +2343,7 @@ class StringParser:
|
||||
DONE: Final = 8
|
||||
|
||||
# Lookup Table for Next State
|
||||
_goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = {
|
||||
_goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = {
|
||||
# A string trailer may start with '.' OR '%'.
|
||||
(START, token.DOT): DOT,
|
||||
(START, token.PERCENT): PERCENT,
|
||||
@ -2376,7 +2372,7 @@ def __init__(self) -> None:
|
||||
self._state = self.START
|
||||
self._unmatched_lpars = 0
|
||||
|
||||
def parse(self, leaves: List[Leaf], string_idx: int) -> int:
|
||||
def parse(self, leaves: list[Leaf], string_idx: int) -> int:
|
||||
"""
|
||||
Pre-conditions:
|
||||
* @leaves[@string_idx].type == token.STRING
|
||||
|
@ -4,7 +4,6 @@
|
||||
from datetime import datetime, timezone
|
||||
from functools import partial
|
||||
from multiprocessing import freeze_support
|
||||
from typing import Set, Tuple
|
||||
|
||||
try:
|
||||
from aiohttp import web
|
||||
@ -191,7 +190,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
||||
|
||||
preview = bool(headers.get(PREVIEW, False))
|
||||
unstable = bool(headers.get(UNSTABLE, False))
|
||||
enable_features: Set[black.Preview] = set()
|
||||
enable_features: set[black.Preview] = set()
|
||||
enable_unstable_features = headers.get(ENABLE_UNSTABLE_FEATURE, "").split(",")
|
||||
for piece in enable_unstable_features:
|
||||
piece = piece.strip()
|
||||
@ -216,7 +215,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
||||
)
|
||||
|
||||
|
||||
def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
|
||||
def parse_python_variant_header(value: str) -> tuple[bool, set[black.TargetVersion]]:
|
||||
if value == "pyi":
|
||||
return True, set()
|
||||
else:
|
||||
|
@ -24,7 +24,7 @@
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from logging import Logger
|
||||
from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast
|
||||
from typing import IO, Any, Iterable, Iterator, Optional, Union, cast
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||
@ -40,7 +40,7 @@
|
||||
class ReleaseRange:
|
||||
start: int
|
||||
end: Optional[int] = None
|
||||
tokens: List[Any] = field(default_factory=list)
|
||||
tokens: list[Any] = field(default_factory=list)
|
||||
|
||||
def lock(self) -> None:
|
||||
total_eaten = len(self.tokens)
|
||||
@ -51,7 +51,7 @@ class TokenProxy:
|
||||
def __init__(self, generator: Any) -> None:
|
||||
self._tokens = generator
|
||||
self._counter = 0
|
||||
self._release_ranges: List[ReleaseRange] = []
|
||||
self._release_ranges: list[ReleaseRange] = []
|
||||
|
||||
@contextmanager
|
||||
def release(self) -> Iterator["TokenProxy"]:
|
||||
@ -121,7 +121,7 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
|
||||
|
||||
lineno = 1
|
||||
column = 0
|
||||
indent_columns: List[int] = []
|
||||
indent_columns: list[int] = []
|
||||
type = value = start = end = line_text = None
|
||||
prefix = ""
|
||||
|
||||
@ -202,8 +202,8 @@ def parse_string(self, text: str, debug: bool = False) -> NL:
|
||||
)
|
||||
return self.parse_tokens(tokens, debug)
|
||||
|
||||
def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
|
||||
lines: List[str] = []
|
||||
def _partially_consume_prefix(self, prefix: str, column: int) -> tuple[str, str]:
|
||||
lines: list[str] = []
|
||||
current_line = ""
|
||||
current_column = 0
|
||||
wait_for_nl = False
|
||||
|
@ -16,15 +16,15 @@
|
||||
import os
|
||||
import pickle
|
||||
import tempfile
|
||||
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
|
||||
from typing import Any, Optional, TypeVar, Union
|
||||
|
||||
# Local imports
|
||||
from . import token
|
||||
|
||||
_P = TypeVar("_P", bound="Grammar")
|
||||
Label = Tuple[int, Optional[str]]
|
||||
DFA = List[List[Tuple[int, int]]]
|
||||
DFAS = Tuple[DFA, Dict[int, int]]
|
||||
Label = tuple[int, Optional[str]]
|
||||
DFA = list[list[tuple[int, int]]]
|
||||
DFAS = tuple[DFA, dict[int, int]]
|
||||
Path = Union[str, "os.PathLike[str]"]
|
||||
|
||||
|
||||
@ -83,16 +83,16 @@ class Grammar:
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.symbol2number: Dict[str, int] = {}
|
||||
self.number2symbol: Dict[int, str] = {}
|
||||
self.states: List[DFA] = []
|
||||
self.dfas: Dict[int, DFAS] = {}
|
||||
self.labels: List[Label] = [(0, "EMPTY")]
|
||||
self.keywords: Dict[str, int] = {}
|
||||
self.soft_keywords: Dict[str, int] = {}
|
||||
self.tokens: Dict[int, int] = {}
|
||||
self.symbol2label: Dict[str, int] = {}
|
||||
self.version: Tuple[int, int] = (0, 0)
|
||||
self.symbol2number: dict[str, int] = {}
|
||||
self.number2symbol: dict[int, str] = {}
|
||||
self.states: list[DFA] = []
|
||||
self.dfas: dict[int, DFAS] = {}
|
||||
self.labels: list[Label] = [(0, "EMPTY")]
|
||||
self.keywords: dict[str, int] = {}
|
||||
self.soft_keywords: dict[str, int] = {}
|
||||
self.tokens: dict[int, int] = {}
|
||||
self.symbol2label: dict[str, int] = {}
|
||||
self.version: tuple[int, int] = (0, 0)
|
||||
self.start = 256
|
||||
# Python 3.7+ parses async as a keyword, not an identifier
|
||||
self.async_keywords = False
|
||||
@ -114,7 +114,7 @@ def dump(self, filename: Path) -> None:
|
||||
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
|
||||
os.replace(f.name, filename)
|
||||
|
||||
def _update(self, attrs: Dict[str, Any]) -> None:
|
||||
def _update(self, attrs: dict[str, Any]) -> None:
|
||||
for k, v in attrs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
|
@ -4,9 +4,9 @@
|
||||
"""Safely evaluate Python string literals without using eval()."""
|
||||
|
||||
import re
|
||||
from typing import Dict, Match
|
||||
from typing import Match
|
||||
|
||||
simple_escapes: Dict[str, str] = {
|
||||
simple_escapes: dict[str, str] = {
|
||||
"a": "\a",
|
||||
"b": "\b",
|
||||
"f": "\f",
|
||||
|
@ -10,19 +10,7 @@
|
||||
|
||||
"""
|
||||
from contextlib import contextmanager
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Union, cast
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
|
||||
@ -34,10 +22,10 @@
|
||||
from blib2to3.pgen2.driver import TokenProxy
|
||||
|
||||
|
||||
Results = Dict[str, NL]
|
||||
Results = dict[str, NL]
|
||||
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
||||
DFA = List[List[Tuple[int, int]]]
|
||||
DFAS = Tuple[DFA, Dict[int, int]]
|
||||
DFA = list[list[tuple[int, int]]]
|
||||
DFAS = tuple[DFA, dict[int, int]]
|
||||
|
||||
|
||||
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
||||
@ -50,24 +38,24 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
||||
|
||||
|
||||
def stack_copy(
|
||||
stack: List[Tuple[DFAS, int, RawNode]],
|
||||
) -> List[Tuple[DFAS, int, RawNode]]:
|
||||
stack: list[tuple[DFAS, int, RawNode]],
|
||||
) -> list[tuple[DFAS, int, RawNode]]:
|
||||
"""Nodeless stack copy."""
|
||||
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
|
||||
|
||||
|
||||
class Recorder:
|
||||
def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None:
|
||||
def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None:
|
||||
self.parser = parser
|
||||
self._ilabels = ilabels
|
||||
self.context = context # not really matter
|
||||
|
||||
self._dead_ilabels: Set[int] = set()
|
||||
self._dead_ilabels: set[int] = set()
|
||||
self._start_point = self.parser.stack
|
||||
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
|
||||
|
||||
@property
|
||||
def ilabels(self) -> Set[int]:
|
||||
def ilabels(self) -> set[int]:
|
||||
return self._dead_ilabels.symmetric_difference(self._ilabels)
|
||||
|
||||
@contextmanager
|
||||
@ -233,9 +221,9 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
|
||||
# where children is a list of nodes or None, and context may be None.
|
||||
newnode: RawNode = (start, None, None, [])
|
||||
stackentry = (self.grammar.dfas[start], 0, newnode)
|
||||
self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry]
|
||||
self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry]
|
||||
self.rootnode: Optional[NL] = None
|
||||
self.used_names: Set[str] = set()
|
||||
self.used_names: set[str] = set()
|
||||
self.proxy = proxy
|
||||
self.last_token = None
|
||||
|
||||
@ -333,7 +321,7 @@ def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> boo
|
||||
# No success finding a transition
|
||||
raise ParseError("bad input", type, value, context)
|
||||
|
||||
def classify(self, type: int, value: str, context: Context) -> List[int]:
|
||||
def classify(self, type: int, value: str, context: Context) -> list[int]:
|
||||
"""Turn a token into a label. (Internal)
|
||||
|
||||
Depending on whether the value is a soft-keyword or not,
|
||||
|
@ -2,18 +2,7 @@
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
import os
|
||||
from typing import (
|
||||
IO,
|
||||
Any,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from typing import IO, Any, Iterator, NoReturn, Optional, Sequence, Union
|
||||
|
||||
from blib2to3.pgen2 import grammar, token, tokenize
|
||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||
@ -29,7 +18,7 @@ class ParserGenerator:
|
||||
filename: Path
|
||||
stream: IO[str]
|
||||
generator: Iterator[GoodTokenInfo]
|
||||
first: Dict[str, Optional[Dict[str, int]]]
|
||||
first: dict[str, Optional[dict[str, int]]]
|
||||
|
||||
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
||||
close_stream = None
|
||||
@ -71,7 +60,7 @@ def make_grammar(self) -> PgenGrammar:
|
||||
c.start = c.symbol2number[self.startsymbol]
|
||||
return c
|
||||
|
||||
def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]:
|
||||
def make_first(self, c: PgenGrammar, name: str) -> dict[int, int]:
|
||||
rawfirst = self.first[name]
|
||||
assert rawfirst is not None
|
||||
first = {}
|
||||
@ -144,7 +133,7 @@ def calcfirst(self, name: str) -> None:
|
||||
dfa = self.dfas[name]
|
||||
self.first[name] = None # dummy to detect left recursion
|
||||
state = dfa[0]
|
||||
totalset: Dict[str, int] = {}
|
||||
totalset: dict[str, int] = {}
|
||||
overlapcheck = {}
|
||||
for label in state.arcs:
|
||||
if label in self.dfas:
|
||||
@ -161,7 +150,7 @@ def calcfirst(self, name: str) -> None:
|
||||
else:
|
||||
totalset[label] = 1
|
||||
overlapcheck[label] = {label: 1}
|
||||
inverse: Dict[str, str] = {}
|
||||
inverse: dict[str, str] = {}
|
||||
for label, itsfirst in overlapcheck.items():
|
||||
for symbol in itsfirst:
|
||||
if symbol in inverse:
|
||||
@ -172,7 +161,7 @@ def calcfirst(self, name: str) -> None:
|
||||
inverse[symbol] = label
|
||||
self.first[name] = totalset
|
||||
|
||||
def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
||||
def parse(self) -> tuple[dict[str, list["DFAState"]], str]:
|
||||
dfas = {}
|
||||
startsymbol: Optional[str] = None
|
||||
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
||||
@ -197,7 +186,7 @@ def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
||||
assert startsymbol is not None
|
||||
return dfas, startsymbol
|
||||
|
||||
def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
||||
def make_dfa(self, start: "NFAState", finish: "NFAState") -> list["DFAState"]:
|
||||
# To turn an NFA into a DFA, we define the states of the DFA
|
||||
# to correspond to *sets* of states of the NFA. Then do some
|
||||
# state reduction. Let's represent sets as dicts with 1 for
|
||||
@ -205,12 +194,12 @@ def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
||||
assert isinstance(start, NFAState)
|
||||
assert isinstance(finish, NFAState)
|
||||
|
||||
def closure(state: NFAState) -> Dict[NFAState, int]:
|
||||
base: Dict[NFAState, int] = {}
|
||||
def closure(state: NFAState) -> dict[NFAState, int]:
|
||||
base: dict[NFAState, int] = {}
|
||||
addclosure(state, base)
|
||||
return base
|
||||
|
||||
def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
||||
def addclosure(state: NFAState, base: dict[NFAState, int]) -> None:
|
||||
assert isinstance(state, NFAState)
|
||||
if state in base:
|
||||
return
|
||||
@ -221,7 +210,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
||||
|
||||
states = [DFAState(closure(start), finish)]
|
||||
for state in states: # NB states grows while we're iterating
|
||||
arcs: Dict[str, Dict[NFAState, int]] = {}
|
||||
arcs: dict[str, dict[NFAState, int]] = {}
|
||||
for nfastate in state.nfaset:
|
||||
for label, next in nfastate.arcs:
|
||||
if label is not None:
|
||||
@ -259,7 +248,7 @@ def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
|
||||
for label, next in sorted(state.arcs.items()):
|
||||
print(" %s -> %d" % (label, dfa.index(next)))
|
||||
|
||||
def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
||||
def simplify_dfa(self, dfa: list["DFAState"]) -> None:
|
||||
# This is not theoretically optimal, but works well enough.
|
||||
# Algorithm: repeatedly look for two states that have the same
|
||||
# set of arcs (same labels pointing to the same nodes) and
|
||||
@ -280,7 +269,7 @@ def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
||||
changes = True
|
||||
break
|
||||
|
||||
def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_rhs(self) -> tuple["NFAState", "NFAState"]:
|
||||
# RHS: ALT ('|' ALT)*
|
||||
a, z = self.parse_alt()
|
||||
if self.value != "|":
|
||||
@ -297,7 +286,7 @@ def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
||||
z.addarc(zz)
|
||||
return aa, zz
|
||||
|
||||
def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_alt(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ALT: ITEM+
|
||||
a, b = self.parse_item()
|
||||
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
|
||||
@ -306,7 +295,7 @@ def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
||||
b = d
|
||||
return a, b
|
||||
|
||||
def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_item(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
||||
if self.value == "[":
|
||||
self.gettoken()
|
||||
@ -326,7 +315,7 @@ def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
||||
else:
|
||||
return a, a
|
||||
|
||||
def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_atom(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ATOM: '(' RHS ')' | NAME | STRING
|
||||
if self.value == "(":
|
||||
self.gettoken()
|
||||
@ -371,7 +360,7 @@ def raise_error(self, msg: str, *args: Any) -> NoReturn:
|
||||
|
||||
|
||||
class NFAState:
|
||||
arcs: List[Tuple[Optional[str], "NFAState"]]
|
||||
arcs: list[tuple[Optional[str], "NFAState"]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.arcs = [] # list of (label, NFAState) pairs
|
||||
@ -383,11 +372,11 @@ def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
|
||||
|
||||
|
||||
class DFAState:
|
||||
nfaset: Dict[NFAState, Any]
|
||||
nfaset: dict[NFAState, Any]
|
||||
isfinal: bool
|
||||
arcs: Dict[str, "DFAState"]
|
||||
arcs: dict[str, "DFAState"]
|
||||
|
||||
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
|
||||
def __init__(self, nfaset: dict[NFAState, Any], final: NFAState) -> None:
|
||||
assert isinstance(nfaset, dict)
|
||||
assert isinstance(next(iter(nfaset)), NFAState)
|
||||
assert isinstance(final, NFAState)
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""Token constants (from "token.h")."""
|
||||
|
||||
from typing import Dict, Final
|
||||
from typing import Final
|
||||
|
||||
# Taken from Python (r53757) and modified to include some tokens
|
||||
# originally monkeypatched in by pgen2.tokenize
|
||||
@ -74,7 +74,7 @@
|
||||
NT_OFFSET: Final = 256
|
||||
# --end constants--
|
||||
|
||||
tok_name: Final[Dict[int, str]] = {}
|
||||
tok_name: Final[dict[int, str]] = {}
|
||||
for _name, _value in list(globals().items()):
|
||||
if type(_value) is int:
|
||||
tok_name[_value] = _name
|
||||
|
@ -29,18 +29,7 @@
|
||||
|
||||
import builtins
|
||||
import sys
|
||||
from typing import (
|
||||
Callable,
|
||||
Final,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from typing import Callable, Final, Iterable, Iterator, Optional, Pattern, Union
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pgen2.token import (
|
||||
@ -93,7 +82,7 @@ def maybe(*choices: str) -> str:
|
||||
return group(*choices) + "?"
|
||||
|
||||
|
||||
def _combinations(*l: str) -> Set[str]:
|
||||
def _combinations(*l: str) -> set[str]:
|
||||
return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()}
|
||||
|
||||
|
||||
@ -248,7 +237,7 @@ class StopTokenizing(Exception):
|
||||
pass
|
||||
|
||||
|
||||
Coord = Tuple[int, int]
|
||||
Coord = tuple[int, int]
|
||||
|
||||
|
||||
def printtoken(
|
||||
@ -289,12 +278,12 @@ def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None:
|
||||
tokeneater(*token_info)
|
||||
|
||||
|
||||
GoodTokenInfo = Tuple[int, str, Coord, Coord, str]
|
||||
TokenInfo = Union[Tuple[int, str], GoodTokenInfo]
|
||||
GoodTokenInfo = tuple[int, str, Coord, Coord, str]
|
||||
TokenInfo = Union[tuple[int, str], GoodTokenInfo]
|
||||
|
||||
|
||||
class Untokenizer:
|
||||
tokens: List[str]
|
||||
tokens: list[str]
|
||||
prev_row: int
|
||||
prev_col: int
|
||||
|
||||
@ -324,7 +313,7 @@ def untokenize(self, iterable: Iterable[TokenInfo]) -> str:
|
||||
self.prev_col = 0
|
||||
return "".join(self.tokens)
|
||||
|
||||
def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None:
|
||||
def compat(self, token: tuple[int, str], iterable: Iterable[TokenInfo]) -> None:
|
||||
startline = False
|
||||
indents = []
|
||||
toks_append = self.tokens.append
|
||||
@ -370,7 +359,7 @@ def _get_normal_name(orig_enc: str) -> str:
|
||||
return orig_enc
|
||||
|
||||
|
||||
def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]:
|
||||
def detect_encoding(readline: Callable[[], bytes]) -> tuple[str, list[bytes]]:
|
||||
"""
|
||||
The detect_encoding() function is used to detect the encoding that should
|
||||
be used to decode a Python source file. It requires one argument, readline,
|
||||
@ -471,7 +460,7 @@ def is_fstring_start(token: str) -> bool:
|
||||
return builtins.any(token.startswith(prefix) for prefix in fstring_prefix)
|
||||
|
||||
|
||||
def _split_fstring_start_and_middle(token: str) -> Tuple[str, str]:
|
||||
def _split_fstring_start_and_middle(token: str) -> tuple[str, str]:
|
||||
for prefix in fstring_prefix:
|
||||
_, prefix, rest = token.partition(prefix)
|
||||
if prefix != "":
|
||||
@ -525,7 +514,7 @@ class FStringState:
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.stack: List[int] = [STATE_NOT_FSTRING]
|
||||
self.stack: list[int] = [STATE_NOT_FSTRING]
|
||||
|
||||
def is_in_fstring_expression(self) -> bool:
|
||||
return self.stack[-1] not in (STATE_MIDDLE, STATE_NOT_FSTRING)
|
||||
@ -581,7 +570,7 @@ def generate_tokens(
|
||||
logical line; continuation lines are included.
|
||||
"""
|
||||
lnum = parenlev = continued = 0
|
||||
parenlev_stack: List[int] = []
|
||||
parenlev_stack: list[int] = []
|
||||
fstring_state = FStringState()
|
||||
formatspec = ""
|
||||
numchars: Final[str] = "0123456789"
|
||||
@ -598,9 +587,9 @@ def generate_tokens(
|
||||
async_def_indent = 0
|
||||
async_def_nl = False
|
||||
|
||||
strstart: Tuple[int, int]
|
||||
endprog_stack: List[Pattern[str]] = []
|
||||
formatspec_start: Tuple[int, int]
|
||||
strstart: tuple[int, int]
|
||||
endprog_stack: list[Pattern[str]] = []
|
||||
formatspec_start: tuple[int, int]
|
||||
|
||||
while 1: # loop over lines in stream
|
||||
try:
|
||||
|
@ -12,18 +12,7 @@
|
||||
|
||||
# mypy: allow-untyped-defs, allow-incomplete-defs
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing import Any, Iterable, Iterator, Optional, TypeVar, Union
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
|
||||
@ -34,7 +23,7 @@
|
||||
|
||||
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
|
||||
|
||||
_type_reprs: Dict[int, Union[str, int]] = {}
|
||||
_type_reprs: dict[int, Union[str, int]] = {}
|
||||
|
||||
|
||||
def type_repr(type_num: int) -> Union[str, int]:
|
||||
@ -57,8 +46,8 @@ def type_repr(type_num: int) -> Union[str, int]:
|
||||
_P = TypeVar("_P", bound="Base")
|
||||
|
||||
NL = Union["Node", "Leaf"]
|
||||
Context = Tuple[str, Tuple[int, int]]
|
||||
RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]]
|
||||
Context = tuple[str, tuple[int, int]]
|
||||
RawNode = tuple[int, Optional[str], Optional[Context], Optional[list[NL]]]
|
||||
|
||||
|
||||
class Base:
|
||||
@ -74,7 +63,7 @@ class Base:
|
||||
# Default values for instance variables
|
||||
type: int # int: token number (< 256) or symbol number (>= 256)
|
||||
parent: Optional["Node"] = None # Parent node pointer, or None
|
||||
children: List[NL] # List of subnodes
|
||||
children: list[NL] # List of subnodes
|
||||
was_changed: bool = False
|
||||
was_checked: bool = False
|
||||
|
||||
@ -135,7 +124,7 @@ def pre_order(self) -> Iterator[NL]:
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def replace(self, new: Union[NL, List[NL]]) -> None:
|
||||
def replace(self, new: Union[NL, list[NL]]) -> None:
|
||||
"""Replace this node with a new one in the parent."""
|
||||
assert self.parent is not None, str(self)
|
||||
assert new is not None
|
||||
@ -242,16 +231,16 @@ def get_suffix(self) -> str:
|
||||
class Node(Base):
|
||||
"""Concrete implementation for interior nodes."""
|
||||
|
||||
fixers_applied: Optional[List[Any]]
|
||||
used_names: Optional[Set[str]]
|
||||
fixers_applied: Optional[list[Any]]
|
||||
used_names: Optional[set[str]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
type: int,
|
||||
children: List[NL],
|
||||
children: list[NL],
|
||||
context: Optional[Any] = None,
|
||||
prefix: Optional[str] = None,
|
||||
fixers_applied: Optional[List[Any]] = None,
|
||||
fixers_applied: Optional[list[Any]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initializer.
|
||||
@ -363,12 +352,12 @@ def append_child(self, child: NL) -> None:
|
||||
self.invalidate_sibling_maps()
|
||||
|
||||
def invalidate_sibling_maps(self) -> None:
|
||||
self.prev_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
||||
self.next_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
||||
self.prev_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||
self.next_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||
|
||||
def update_sibling_maps(self) -> None:
|
||||
_prev: Dict[int, Optional[NL]] = {}
|
||||
_next: Dict[int, Optional[NL]] = {}
|
||||
_prev: dict[int, Optional[NL]] = {}
|
||||
_next: dict[int, Optional[NL]] = {}
|
||||
self.prev_sibling_map = _prev
|
||||
self.next_sibling_map = _next
|
||||
previous: Optional[NL] = None
|
||||
@ -384,11 +373,11 @@ class Leaf(Base):
|
||||
|
||||
# Default values for instance variables
|
||||
value: str
|
||||
fixers_applied: List[Any]
|
||||
fixers_applied: list[Any]
|
||||
bracket_depth: int
|
||||
# Changed later in brackets.py
|
||||
opening_bracket: Optional["Leaf"] = None
|
||||
used_names: Optional[Set[str]]
|
||||
used_names: Optional[set[str]]
|
||||
_prefix = "" # Whitespace and comments preceding this token in the input
|
||||
lineno: int = 0 # Line where this token starts in the input
|
||||
column: int = 0 # Column where this token starts in the input
|
||||
@ -403,7 +392,7 @@ def __init__(
|
||||
value: str,
|
||||
context: Optional[Context] = None,
|
||||
prefix: Optional[str] = None,
|
||||
fixers_applied: List[Any] = [],
|
||||
fixers_applied: list[Any] = [],
|
||||
opening_bracket: Optional["Leaf"] = None,
|
||||
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
|
||||
) -> None:
|
||||
@ -421,7 +410,7 @@ def __init__(
|
||||
self.value = value
|
||||
if prefix is not None:
|
||||
self._prefix = prefix
|
||||
self.fixers_applied: Optional[List[Any]] = fixers_applied[:]
|
||||
self.fixers_applied: Optional[list[Any]] = fixers_applied[:]
|
||||
self.children = []
|
||||
self.opening_bracket = opening_bracket
|
||||
self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
|
||||
@ -503,7 +492,7 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL:
|
||||
return Leaf(type, value or "", context=context)
|
||||
|
||||
|
||||
_Results = Dict[str, NL]
|
||||
_Results = dict[str, NL]
|
||||
|
||||
|
||||
class BasePattern:
|
||||
@ -576,7 +565,7 @@ def match(self, node: NL, results: Optional[_Results] = None) -> bool:
|
||||
results[self.name] = node
|
||||
return True
|
||||
|
||||
def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool:
|
||||
def match_seq(self, nodes: list[NL], results: Optional[_Results] = None) -> bool:
|
||||
"""
|
||||
Does this pattern exactly match a sequence of nodes?
|
||||
|
||||
@ -586,7 +575,7 @@ def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool
|
||||
return False
|
||||
return self.match(nodes[0], results)
|
||||
|
||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
||||
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||
"""
|
||||
Generator yielding all matches for this pattern.
|
||||
|
||||
@ -816,7 +805,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||
"""
|
||||
Generator yielding matches for a sequence of nodes.
|
||||
|
||||
@ -861,7 +850,7 @@ def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
if hasattr(sys, "getrefcount"):
|
||||
sys.stderr = save_stderr
|
||||
|
||||
def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||
"""Helper to iteratively yield the matches."""
|
||||
nodelen = len(nodes)
|
||||
if 0 >= self.min:
|
||||
@ -890,7 +879,7 @@ def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
new_results.append((c0 + c1, r))
|
||||
results = new_results
|
||||
|
||||
def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
||||
def _bare_name_matches(self, nodes) -> tuple[int, _Results]:
|
||||
"""Special optimized matcher for bare_name."""
|
||||
count = 0
|
||||
r = {} # type: _Results
|
||||
@ -907,7 +896,7 @@ def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
||||
r[self.name] = nodes[:count]
|
||||
return count, r
|
||||
|
||||
def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]:
|
||||
def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]:
|
||||
"""Helper to recursively yield the matches."""
|
||||
assert self.content is not None
|
||||
if count >= self.min:
|
||||
@ -944,7 +933,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
||||
# We only match an empty sequence of nodes in its entirety
|
||||
return len(nodes) == 0
|
||||
|
||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
||||
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||
if self.content is None:
|
||||
# Return a match if there is an empty sequence
|
||||
if len(nodes) == 0:
|
||||
@ -957,8 +946,8 @@ def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
||||
|
||||
|
||||
def generate_matches(
|
||||
patterns: List[BasePattern], nodes: List[NL]
|
||||
) -> Iterator[Tuple[int, _Results]]:
|
||||
patterns: list[BasePattern], nodes: list[NL]
|
||||
) -> Iterator[tuple[int, _Results]]:
|
||||
"""
|
||||
Generator yielding matches for a sequence of patterns and nodes.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user