Run pyupgrade on blib2to3 and src (#3771)
This commit is contained in:
parent
114e8357e6
commit
0b4d7d55f7
@ -42,7 +42,7 @@
|
||||
import colorama # noqa: F401
|
||||
|
||||
|
||||
@lru_cache()
|
||||
@lru_cache
|
||||
def find_project_root(
|
||||
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
||||
) -> Tuple[Path, str]:
|
||||
@ -212,7 +212,7 @@ def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:
|
||||
return SpecifierSet(",".join(str(s) for s in specifiers))
|
||||
|
||||
|
||||
@lru_cache()
|
||||
@lru_cache
|
||||
def find_user_pyproject_toml() -> Path:
|
||||
r"""Return the path to the top-level user configuration for black.
|
||||
|
||||
@ -232,7 +232,7 @@ def find_user_pyproject_toml() -> Path:
|
||||
return user_config_path.resolve()
|
||||
|
||||
|
||||
@lru_cache()
|
||||
@lru_cache
|
||||
def get_gitignore(root: Path) -> PathSpec:
|
||||
"""Return a PathSpec matching gitignore content if present."""
|
||||
gitignore = root / ".gitignore"
|
||||
|
@ -55,7 +55,7 @@ class Replacement:
|
||||
src: str
|
||||
|
||||
|
||||
@lru_cache()
|
||||
@lru_cache
|
||||
def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool:
|
||||
try:
|
||||
# isort: off
|
||||
|
@ -63,7 +63,7 @@ def parse_graminit_h(self, filename):
|
||||
try:
|
||||
f = open(filename)
|
||||
except OSError as err:
|
||||
print("Can't open %s: %s" % (filename, err))
|
||||
print(f"Can't open {filename}: {err}")
|
||||
return False
|
||||
self.symbol2number = {}
|
||||
self.number2symbol = {}
|
||||
@ -72,7 +72,7 @@ def parse_graminit_h(self, filename):
|
||||
lineno += 1
|
||||
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
|
||||
if not mo and line.strip():
|
||||
print("%s(%s): can't parse %s" % (filename, lineno, line.strip()))
|
||||
print(f"{filename}({lineno}): can't parse {line.strip()}")
|
||||
else:
|
||||
symbol, number = mo.groups()
|
||||
number = int(number)
|
||||
@ -113,7 +113,7 @@ def parse_graminit_c(self, filename):
|
||||
try:
|
||||
f = open(filename)
|
||||
except OSError as err:
|
||||
print("Can't open %s: %s" % (filename, err))
|
||||
print(f"Can't open {filename}: {err}")
|
||||
return False
|
||||
# The code below essentially uses f's iterator-ness!
|
||||
lineno = 0
|
||||
|
@ -28,11 +28,8 @@
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Text,
|
||||
Iterator,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Generic,
|
||||
Union,
|
||||
)
|
||||
from contextlib import contextmanager
|
||||
@ -116,7 +113,7 @@ def can_advance(self, to: int) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
class Driver(object):
|
||||
class Driver:
|
||||
def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
|
||||
self.grammar = grammar
|
||||
if logger is None:
|
||||
@ -189,30 +186,30 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
|
||||
assert p.rootnode is not None
|
||||
return p.rootnode
|
||||
|
||||
def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL:
|
||||
def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:
|
||||
"""Parse a stream and return the syntax tree."""
|
||||
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
|
||||
return self.parse_tokens(tokens, debug)
|
||||
|
||||
def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL:
|
||||
def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:
|
||||
"""Parse a stream and return the syntax tree."""
|
||||
return self.parse_stream_raw(stream, debug)
|
||||
|
||||
def parse_file(
|
||||
self, filename: Path, encoding: Optional[Text] = None, debug: bool = False
|
||||
self, filename: Path, encoding: Optional[str] = None, debug: bool = False
|
||||
) -> NL:
|
||||
"""Parse a file and return the syntax tree."""
|
||||
with io.open(filename, "r", encoding=encoding) as stream:
|
||||
with open(filename, encoding=encoding) as stream:
|
||||
return self.parse_stream(stream, debug)
|
||||
|
||||
def parse_string(self, text: Text, debug: bool = False) -> NL:
|
||||
def parse_string(self, text: str, debug: bool = False) -> NL:
|
||||
"""Parse a string and return the syntax tree."""
|
||||
tokens = tokenize.generate_tokens(
|
||||
io.StringIO(text).readline, grammar=self.grammar
|
||||
)
|
||||
return self.parse_tokens(tokens, debug)
|
||||
|
||||
def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]:
|
||||
def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
|
||||
lines: List[str] = []
|
||||
current_line = ""
|
||||
current_column = 0
|
||||
@ -240,7 +237,7 @@ def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Te
|
||||
return "".join(lines), current_line
|
||||
|
||||
|
||||
def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:
|
||||
def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str:
|
||||
head, tail = os.path.splitext(gt)
|
||||
if tail == ".txt":
|
||||
tail = ""
|
||||
@ -252,8 +249,8 @@ def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:
|
||||
|
||||
|
||||
def load_grammar(
|
||||
gt: Text = "Grammar.txt",
|
||||
gp: Optional[Text] = None,
|
||||
gt: str = "Grammar.txt",
|
||||
gp: Optional[str] = None,
|
||||
save: bool = True,
|
||||
force: bool = False,
|
||||
logger: Optional[Logger] = None,
|
||||
@ -276,7 +273,7 @@ def load_grammar(
|
||||
return g
|
||||
|
||||
|
||||
def _newer(a: Text, b: Text) -> bool:
|
||||
def _newer(a: str, b: str) -> bool:
|
||||
"""Inquire whether file a was written since file b."""
|
||||
if not os.path.exists(a):
|
||||
return False
|
||||
@ -286,7 +283,7 @@ def _newer(a: Text, b: Text) -> bool:
|
||||
|
||||
|
||||
def load_packaged_grammar(
|
||||
package: str, grammar_source: Text, cache_dir: Optional[Path] = None
|
||||
package: str, grammar_source: str, cache_dir: Optional[Path] = None
|
||||
) -> grammar.Grammar:
|
||||
"""Normally, loads a pickled grammar by doing
|
||||
pkgutil.get_data(package, pickled_grammar)
|
||||
@ -309,7 +306,7 @@ def load_packaged_grammar(
|
||||
return g
|
||||
|
||||
|
||||
def main(*args: Text) -> bool:
|
||||
def main(*args: str) -> bool:
|
||||
"""Main program, when run as a script: produce grammar pickle files.
|
||||
|
||||
Calls load_grammar for each argument, a path to a grammar text file.
|
||||
|
@ -16,19 +16,19 @@
|
||||
import os
|
||||
import pickle
|
||||
import tempfile
|
||||
from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar, Union
|
||||
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
|
||||
|
||||
# Local imports
|
||||
from . import token
|
||||
|
||||
_P = TypeVar("_P", bound="Grammar")
|
||||
Label = Tuple[int, Optional[Text]]
|
||||
Label = Tuple[int, Optional[str]]
|
||||
DFA = List[List[Tuple[int, int]]]
|
||||
DFAS = Tuple[DFA, Dict[int, int]]
|
||||
Path = Union[str, "os.PathLike[str]"]
|
||||
|
||||
|
||||
class Grammar(object):
|
||||
class Grammar:
|
||||
"""Pgen parsing tables conversion class.
|
||||
|
||||
Once initialized, this class supplies the grammar tables for the
|
||||
|
@ -5,10 +5,10 @@
|
||||
|
||||
import re
|
||||
|
||||
from typing import Dict, Match, Text
|
||||
from typing import Dict, Match
|
||||
|
||||
|
||||
simple_escapes: Dict[Text, Text] = {
|
||||
simple_escapes: Dict[str, str] = {
|
||||
"a": "\a",
|
||||
"b": "\b",
|
||||
"f": "\f",
|
||||
@ -22,7 +22,7 @@
|
||||
}
|
||||
|
||||
|
||||
def escape(m: Match[Text]) -> Text:
|
||||
def escape(m: Match[str]) -> str:
|
||||
all, tail = m.group(0, 1)
|
||||
assert all.startswith("\\")
|
||||
esc = simple_escapes.get(tail)
|
||||
@ -44,7 +44,7 @@ def escape(m: Match[Text]) -> Text:
|
||||
return chr(i)
|
||||
|
||||
|
||||
def evalString(s: Text) -> Text:
|
||||
def evalString(s: str) -> str:
|
||||
assert s.startswith("'") or s.startswith('"'), repr(s[:1])
|
||||
q = s[0]
|
||||
if s[:3] == q * 3:
|
||||
|
@ -9,7 +9,6 @@
|
||||
how this parsing engine works.
|
||||
|
||||
"""
|
||||
import copy
|
||||
from contextlib import contextmanager
|
||||
|
||||
# Local imports
|
||||
@ -18,7 +17,6 @@
|
||||
cast,
|
||||
Any,
|
||||
Optional,
|
||||
Text,
|
||||
Union,
|
||||
Tuple,
|
||||
Dict,
|
||||
@ -35,7 +33,7 @@
|
||||
from blib2to3.pgen2.driver import TokenProxy
|
||||
|
||||
|
||||
Results = Dict[Text, NL]
|
||||
Results = Dict[str, NL]
|
||||
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
||||
DFA = List[List[Tuple[int, int]]]
|
||||
DFAS = Tuple[DFA, Dict[int, int]]
|
||||
@ -100,7 +98,7 @@ def backtrack(self) -> Iterator[None]:
|
||||
finally:
|
||||
self.parser.is_backtracking = is_backtracking
|
||||
|
||||
def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
|
||||
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
|
||||
func: Callable[..., Any]
|
||||
if raw:
|
||||
func = self.parser._addtoken
|
||||
@ -114,7 +112,7 @@ def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
|
||||
args.insert(0, ilabel)
|
||||
func(*args)
|
||||
|
||||
def determine_route(self, value: Optional[Text] = None, force: bool = False) -> Optional[int]:
|
||||
def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]:
|
||||
alive_ilabels = self.ilabels
|
||||
if len(alive_ilabels) == 0:
|
||||
*_, most_successful_ilabel = self._dead_ilabels
|
||||
@ -131,10 +129,10 @@ class ParseError(Exception):
|
||||
"""Exception to signal the parser is stuck."""
|
||||
|
||||
def __init__(
|
||||
self, msg: Text, type: Optional[int], value: Optional[Text], context: Context
|
||||
self, msg: str, type: Optional[int], value: Optional[str], context: Context
|
||||
) -> None:
|
||||
Exception.__init__(
|
||||
self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)
|
||||
self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
|
||||
)
|
||||
self.msg = msg
|
||||
self.type = type
|
||||
@ -142,7 +140,7 @@ def __init__(
|
||||
self.context = context
|
||||
|
||||
|
||||
class Parser(object):
|
||||
class Parser:
|
||||
"""Parser engine.
|
||||
|
||||
The proper usage sequence is:
|
||||
@ -236,7 +234,7 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
|
||||
self.used_names: Set[str] = set()
|
||||
self.proxy = proxy
|
||||
|
||||
def addtoken(self, type: int, value: Text, context: Context) -> bool:
|
||||
def addtoken(self, type: int, value: str, context: Context) -> bool:
|
||||
"""Add a token; return True iff this is the end of the program."""
|
||||
# Map from token to label
|
||||
ilabels = self.classify(type, value, context)
|
||||
@ -284,7 +282,7 @@ def addtoken(self, type: int, value: Text, context: Context) -> bool:
|
||||
|
||||
return self._addtoken(ilabel, type, value, context)
|
||||
|
||||
def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool:
|
||||
def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool:
|
||||
# Loop until the token is shifted; may raise exceptions
|
||||
while True:
|
||||
dfa, state, node = self.stack[-1]
|
||||
@ -329,7 +327,7 @@ def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bo
|
||||
# No success finding a transition
|
||||
raise ParseError("bad input", type, value, context)
|
||||
|
||||
def classify(self, type: int, value: Text, context: Context) -> List[int]:
|
||||
def classify(self, type: int, value: str, context: Context) -> List[int]:
|
||||
"""Turn a token into a label. (Internal)
|
||||
|
||||
Depending on whether the value is a soft-keyword or not,
|
||||
@ -352,7 +350,7 @@ def classify(self, type: int, value: Text, context: Context) -> List[int]:
|
||||
raise ParseError("bad token", type, value, context)
|
||||
return [ilabel]
|
||||
|
||||
def shift(self, type: int, value: Text, newstate: int, context: Context) -> None:
|
||||
def shift(self, type: int, value: str, newstate: int, context: Context) -> None:
|
||||
"""Shift a token. (Internal)"""
|
||||
if self.is_backtracking:
|
||||
dfa, state, _ = self.stack[-1]
|
||||
|
@ -11,7 +11,6 @@
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Text,
|
||||
Tuple,
|
||||
Union,
|
||||
Sequence,
|
||||
@ -29,13 +28,13 @@ class PgenGrammar(grammar.Grammar):
|
||||
pass
|
||||
|
||||
|
||||
class ParserGenerator(object):
|
||||
class ParserGenerator:
|
||||
filename: Path
|
||||
stream: IO[Text]
|
||||
stream: IO[str]
|
||||
generator: Iterator[GoodTokenInfo]
|
||||
first: Dict[Text, Optional[Dict[Text, int]]]
|
||||
first: Dict[str, Optional[Dict[str, int]]]
|
||||
|
||||
def __init__(self, filename: Path, stream: Optional[IO[Text]] = None) -> None:
|
||||
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
||||
close_stream = None
|
||||
if stream is None:
|
||||
stream = open(filename, encoding="utf-8")
|
||||
@ -75,7 +74,7 @@ def make_grammar(self) -> PgenGrammar:
|
||||
c.start = c.symbol2number[self.startsymbol]
|
||||
return c
|
||||
|
||||
def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]:
|
||||
def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]:
|
||||
rawfirst = self.first[name]
|
||||
assert rawfirst is not None
|
||||
first = {}
|
||||
@ -85,7 +84,7 @@ def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]:
|
||||
first[ilabel] = 1
|
||||
return first
|
||||
|
||||
def make_label(self, c: PgenGrammar, label: Text) -> int:
|
||||
def make_label(self, c: PgenGrammar, label: str) -> int:
|
||||
# XXX Maybe this should be a method on a subclass of converter?
|
||||
ilabel = len(c.labels)
|
||||
if label[0].isalpha():
|
||||
@ -144,7 +143,7 @@ def addfirstsets(self) -> None:
|
||||
self.calcfirst(name)
|
||||
# print name, self.first[name].keys()
|
||||
|
||||
def calcfirst(self, name: Text) -> None:
|
||||
def calcfirst(self, name: str) -> None:
|
||||
dfa = self.dfas[name]
|
||||
self.first[name] = None # dummy to detect left recursion
|
||||
state = dfa[0]
|
||||
@ -176,7 +175,7 @@ def calcfirst(self, name: Text) -> None:
|
||||
inverse[symbol] = label
|
||||
self.first[name] = totalset
|
||||
|
||||
def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text]:
|
||||
def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
||||
dfas = {}
|
||||
startsymbol: Optional[str] = None
|
||||
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
||||
@ -240,7 +239,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
||||
state.addarc(st, label)
|
||||
return states # List of DFAState instances; first one is start
|
||||
|
||||
def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None:
|
||||
def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None:
|
||||
print("Dump of NFA for", name)
|
||||
todo = [start]
|
||||
for i, state in enumerate(todo):
|
||||
@ -256,7 +255,7 @@ def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None:
|
||||
else:
|
||||
print(" %s -> %d" % (label, j))
|
||||
|
||||
def dump_dfa(self, name: Text, dfa: Sequence["DFAState"]) -> None:
|
||||
def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
|
||||
print("Dump of DFA for", name)
|
||||
for i, state in enumerate(dfa):
|
||||
print(" State", i, state.isfinal and "(final)" or "")
|
||||
@ -349,7 +348,7 @@ def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
|
||||
)
|
||||
assert False
|
||||
|
||||
def expect(self, type: int, value: Optional[Any] = None) -> Text:
|
||||
def expect(self, type: int, value: Optional[Any] = None) -> str:
|
||||
if self.type != type or (value is not None and self.value != value):
|
||||
self.raise_error(
|
||||
"expected %s/%s, got %s/%s", type, value, self.type, self.value
|
||||
@ -374,22 +373,22 @@ def raise_error(self, msg: str, *args: Any) -> NoReturn:
|
||||
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
|
||||
|
||||
|
||||
class NFAState(object):
|
||||
arcs: List[Tuple[Optional[Text], "NFAState"]]
|
||||
class NFAState:
|
||||
arcs: List[Tuple[Optional[str], "NFAState"]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.arcs = [] # list of (label, NFAState) pairs
|
||||
|
||||
def addarc(self, next: "NFAState", label: Optional[Text] = None) -> None:
|
||||
def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
|
||||
assert label is None or isinstance(label, str)
|
||||
assert isinstance(next, NFAState)
|
||||
self.arcs.append((label, next))
|
||||
|
||||
|
||||
class DFAState(object):
|
||||
class DFAState:
|
||||
nfaset: Dict[NFAState, Any]
|
||||
isfinal: bool
|
||||
arcs: Dict[Text, "DFAState"]
|
||||
arcs: Dict[str, "DFAState"]
|
||||
|
||||
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
|
||||
assert isinstance(nfaset, dict)
|
||||
@ -399,7 +398,7 @@ def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
|
||||
self.isfinal = final in nfaset
|
||||
self.arcs = {} # map from label to DFAState
|
||||
|
||||
def addarc(self, next: "DFAState", label: Text) -> None:
|
||||
def addarc(self, next: "DFAState", label: str) -> None:
|
||||
assert isinstance(label, str)
|
||||
assert label not in self.arcs
|
||||
assert isinstance(next, DFAState)
|
||||
|
@ -1,6 +1,5 @@
|
||||
"""Token constants (from "token.h")."""
|
||||
|
||||
import sys
|
||||
from typing import Dict
|
||||
|
||||
from typing import Final
|
||||
@ -75,7 +74,7 @@
|
||||
|
||||
tok_name: Final[Dict[int, str]] = {}
|
||||
for _name, _value in list(globals().items()):
|
||||
if type(_value) is type(0):
|
||||
if type(_value) is int:
|
||||
tok_name[_value] = _name
|
||||
|
||||
|
||||
|
@ -35,7 +35,6 @@
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Text,
|
||||
Tuple,
|
||||
Pattern,
|
||||
Union,
|
||||
@ -77,7 +76,7 @@ def maybe(*choices: str) -> str:
|
||||
|
||||
|
||||
def _combinations(*l: str) -> Set[str]:
|
||||
return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold())
|
||||
return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()}
|
||||
|
||||
|
||||
Whitespace = r"[ \f\t]*"
|
||||
@ -189,7 +188,7 @@ class StopTokenizing(Exception):
|
||||
|
||||
|
||||
def printtoken(
|
||||
type: int, token: Text, srow_col: Coord, erow_col: Coord, line: Text
|
||||
type: int, token: str, srow_col: Coord, erow_col: Coord, line: str
|
||||
) -> None: # for testing
|
||||
(srow, scol) = srow_col
|
||||
(erow, ecol) = erow_col
|
||||
@ -198,10 +197,10 @@ def printtoken(
|
||||
)
|
||||
|
||||
|
||||
TokenEater = Callable[[int, Text, Coord, Coord, Text], None]
|
||||
TokenEater = Callable[[int, str, Coord, Coord, str], None]
|
||||
|
||||
|
||||
def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None:
|
||||
def tokenize(readline: Callable[[], str], tokeneater: TokenEater = printtoken) -> None:
|
||||
"""
|
||||
The tokenize() function accepts two parameters: one representing the
|
||||
input stream, and one providing an output mechanism for tokenize().
|
||||
@ -221,17 +220,17 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken)
|
||||
|
||||
|
||||
# backwards compatible interface
|
||||
def tokenize_loop(readline: Callable[[], Text], tokeneater: TokenEater) -> None:
|
||||
def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None:
|
||||
for token_info in generate_tokens(readline):
|
||||
tokeneater(*token_info)
|
||||
|
||||
|
||||
GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text]
|
||||
GoodTokenInfo = Tuple[int, str, Coord, Coord, str]
|
||||
TokenInfo = Union[Tuple[int, str], GoodTokenInfo]
|
||||
|
||||
|
||||
class Untokenizer:
|
||||
tokens: List[Text]
|
||||
tokens: List[str]
|
||||
prev_row: int
|
||||
prev_col: int
|
||||
|
||||
@ -247,13 +246,13 @@ def add_whitespace(self, start: Coord) -> None:
|
||||
if col_offset:
|
||||
self.tokens.append(" " * col_offset)
|
||||
|
||||
def untokenize(self, iterable: Iterable[TokenInfo]) -> Text:
|
||||
def untokenize(self, iterable: Iterable[TokenInfo]) -> str:
|
||||
for t in iterable:
|
||||
if len(t) == 2:
|
||||
self.compat(cast(Tuple[int, str], t), iterable)
|
||||
break
|
||||
tok_type, token, start, end, line = cast(
|
||||
Tuple[int, Text, Coord, Coord, Text], t
|
||||
Tuple[int, str, Coord, Coord, str], t
|
||||
)
|
||||
self.add_whitespace(start)
|
||||
self.tokens.append(token)
|
||||
@ -263,7 +262,7 @@ def untokenize(self, iterable: Iterable[TokenInfo]) -> Text:
|
||||
self.prev_col = 0
|
||||
return "".join(self.tokens)
|
||||
|
||||
def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None:
|
||||
def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None:
|
||||
startline = False
|
||||
indents = []
|
||||
toks_append = self.tokens.append
|
||||
@ -335,7 +334,7 @@ def read_or_stop() -> bytes:
|
||||
try:
|
||||
return readline()
|
||||
except StopIteration:
|
||||
return bytes()
|
||||
return b''
|
||||
|
||||
def find_cookie(line: bytes) -> Optional[str]:
|
||||
try:
|
||||
@ -384,7 +383,7 @@ def find_cookie(line: bytes) -> Optional[str]:
|
||||
return default, [first, second]
|
||||
|
||||
|
||||
def untokenize(iterable: Iterable[TokenInfo]) -> Text:
|
||||
def untokenize(iterable: Iterable[TokenInfo]) -> str:
|
||||
"""Transform tokens back into Python source code.
|
||||
|
||||
Each element returned by the iterable must be a token sequence
|
||||
@ -407,7 +406,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> Text:
|
||||
|
||||
|
||||
def generate_tokens(
|
||||
readline: Callable[[], Text], grammar: Optional[Grammar] = None
|
||||
readline: Callable[[], str], grammar: Optional[Grammar] = None
|
||||
) -> Iterator[GoodTokenInfo]:
|
||||
"""
|
||||
The generate_tokens() generator requires one argument, readline, which
|
||||
|
@ -9,7 +9,6 @@
|
||||
from typing import Union
|
||||
|
||||
# Local imports
|
||||
from .pgen2 import token
|
||||
from .pgen2 import driver
|
||||
|
||||
from .pgen2.grammar import Grammar
|
||||
@ -21,7 +20,7 @@
|
||||
# "PatternGrammar.txt")
|
||||
|
||||
|
||||
class Symbols(object):
|
||||
class Symbols:
|
||||
def __init__(self, grammar: Grammar) -> None:
|
||||
"""Initializer.
|
||||
|
||||
|
@ -18,7 +18,6 @@
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Text,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
@ -34,10 +33,10 @@
|
||||
|
||||
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
|
||||
|
||||
_type_reprs: Dict[int, Union[Text, int]] = {}
|
||||
_type_reprs: Dict[int, Union[str, int]] = {}
|
||||
|
||||
|
||||
def type_repr(type_num: int) -> Union[Text, int]:
|
||||
def type_repr(type_num: int) -> Union[str, int]:
|
||||
global _type_reprs
|
||||
if not _type_reprs:
|
||||
from .pygram import python_symbols
|
||||
@ -54,11 +53,11 @@ def type_repr(type_num: int) -> Union[Text, int]:
|
||||
_P = TypeVar("_P", bound="Base")
|
||||
|
||||
NL = Union["Node", "Leaf"]
|
||||
Context = Tuple[Text, Tuple[int, int]]
|
||||
RawNode = Tuple[int, Optional[Text], Optional[Context], Optional[List[NL]]]
|
||||
Context = Tuple[str, Tuple[int, int]]
|
||||
RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]]
|
||||
|
||||
|
||||
class Base(object):
|
||||
class Base:
|
||||
|
||||
"""
|
||||
Abstract base class for Node and Leaf.
|
||||
@ -92,7 +91,7 @@ def __eq__(self, other: Any) -> bool:
|
||||
return self._eq(other)
|
||||
|
||||
@property
|
||||
def prefix(self) -> Text:
|
||||
def prefix(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
def _eq(self: _P, other: _P) -> bool:
|
||||
@ -225,7 +224,7 @@ def depth(self) -> int:
|
||||
return 0
|
||||
return 1 + self.parent.depth()
|
||||
|
||||
def get_suffix(self) -> Text:
|
||||
def get_suffix(self) -> str:
|
||||
"""
|
||||
Return the string immediately following the invocant node. This is
|
||||
effectively equivalent to node.next_sibling.prefix
|
||||
@ -242,14 +241,14 @@ class Node(Base):
|
||||
"""Concrete implementation for interior nodes."""
|
||||
|
||||
fixers_applied: Optional[List[Any]]
|
||||
used_names: Optional[Set[Text]]
|
||||
used_names: Optional[Set[str]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
type: int,
|
||||
children: List[NL],
|
||||
context: Optional[Any] = None,
|
||||
prefix: Optional[Text] = None,
|
||||
prefix: Optional[str] = None,
|
||||
fixers_applied: Optional[List[Any]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
@ -274,16 +273,16 @@ def __init__(
|
||||
else:
|
||||
self.fixers_applied = None
|
||||
|
||||
def __repr__(self) -> Text:
|
||||
def __repr__(self) -> str:
|
||||
"""Return a canonical string representation."""
|
||||
assert self.type is not None
|
||||
return "%s(%s, %r)" % (
|
||||
return "{}({}, {!r})".format(
|
||||
self.__class__.__name__,
|
||||
type_repr(self.type),
|
||||
self.children,
|
||||
)
|
||||
|
||||
def __str__(self) -> Text:
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
Return a pretty string representation.
|
||||
|
||||
@ -317,7 +316,7 @@ def pre_order(self) -> Iterator[NL]:
|
||||
yield from child.pre_order()
|
||||
|
||||
@property
|
||||
def prefix(self) -> Text:
|
||||
def prefix(self) -> str:
|
||||
"""
|
||||
The whitespace and comments preceding this node in the input.
|
||||
"""
|
||||
@ -326,7 +325,7 @@ def prefix(self) -> Text:
|
||||
return self.children[0].prefix
|
||||
|
||||
@prefix.setter
|
||||
def prefix(self, prefix: Text) -> None:
|
||||
def prefix(self, prefix: str) -> None:
|
||||
if self.children:
|
||||
self.children[0].prefix = prefix
|
||||
|
||||
@ -383,12 +382,12 @@ class Leaf(Base):
|
||||
"""Concrete implementation for leaf nodes."""
|
||||
|
||||
# Default values for instance variables
|
||||
value: Text
|
||||
value: str
|
||||
fixers_applied: List[Any]
|
||||
bracket_depth: int
|
||||
# Changed later in brackets.py
|
||||
opening_bracket: Optional["Leaf"] = None
|
||||
used_names: Optional[Set[Text]]
|
||||
used_names: Optional[Set[str]]
|
||||
_prefix = "" # Whitespace and comments preceding this token in the input
|
||||
lineno: int = 0 # Line where this token starts in the input
|
||||
column: int = 0 # Column where this token starts in the input
|
||||
@ -400,9 +399,9 @@ class Leaf(Base):
|
||||
def __init__(
|
||||
self,
|
||||
type: int,
|
||||
value: Text,
|
||||
value: str,
|
||||
context: Optional[Context] = None,
|
||||
prefix: Optional[Text] = None,
|
||||
prefix: Optional[str] = None,
|
||||
fixers_applied: List[Any] = [],
|
||||
opening_bracket: Optional["Leaf"] = None,
|
||||
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
|
||||
@ -431,13 +430,13 @@ def __repr__(self) -> str:
|
||||
from .pgen2.token import tok_name
|
||||
|
||||
assert self.type is not None
|
||||
return "%s(%s, %r)" % (
|
||||
return "{}({}, {!r})".format(
|
||||
self.__class__.__name__,
|
||||
tok_name.get(self.type, self.type),
|
||||
self.value,
|
||||
)
|
||||
|
||||
def __str__(self) -> Text:
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
Return a pretty string representation.
|
||||
|
||||
@ -471,14 +470,14 @@ def pre_order(self) -> Iterator["Leaf"]:
|
||||
yield self
|
||||
|
||||
@property
|
||||
def prefix(self) -> Text:
|
||||
def prefix(self) -> str:
|
||||
"""
|
||||
The whitespace and comments preceding this token in the input.
|
||||
"""
|
||||
return self._prefix
|
||||
|
||||
@prefix.setter
|
||||
def prefix(self, prefix: Text) -> None:
|
||||
def prefix(self, prefix: str) -> None:
|
||||
self.changed()
|
||||
self._prefix = prefix
|
||||
|
||||
@ -503,10 +502,10 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL:
|
||||
return Leaf(type, value or "", context=context)
|
||||
|
||||
|
||||
_Results = Dict[Text, NL]
|
||||
_Results = Dict[str, NL]
|
||||
|
||||
|
||||
class BasePattern(object):
|
||||
class BasePattern:
|
||||
|
||||
"""
|
||||
A pattern is a tree matching pattern.
|
||||
@ -526,19 +525,19 @@ class BasePattern(object):
|
||||
type: Optional[int]
|
||||
type = None # Node type (token if < 256, symbol if >= 256)
|
||||
content: Any = None # Optional content matching pattern
|
||||
name: Optional[Text] = None # Optional name used to store match in results dict
|
||||
name: Optional[str] = None # Optional name used to store match in results dict
|
||||
|
||||
def __new__(cls, *args, **kwds):
|
||||
"""Constructor that prevents BasePattern from being instantiated."""
|
||||
assert cls is not BasePattern, "Cannot instantiate BasePattern"
|
||||
return object.__new__(cls)
|
||||
|
||||
def __repr__(self) -> Text:
|
||||
def __repr__(self) -> str:
|
||||
assert self.type is not None
|
||||
args = [type_repr(self.type), self.content, self.name]
|
||||
while args and args[-1] is None:
|
||||
del args[-1]
|
||||
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
|
||||
return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, args)))
|
||||
|
||||
def _submatch(self, node, results=None) -> bool:
|
||||
raise NotImplementedError
|
||||
@ -602,8 +601,8 @@ class LeafPattern(BasePattern):
|
||||
def __init__(
|
||||
self,
|
||||
type: Optional[int] = None,
|
||||
content: Optional[Text] = None,
|
||||
name: Optional[Text] = None,
|
||||
content: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initializer. Takes optional type, content, and name.
|
||||
@ -653,8 +652,8 @@ class NodePattern(BasePattern):
|
||||
def __init__(
|
||||
self,
|
||||
type: Optional[int] = None,
|
||||
content: Optional[Iterable[Text]] = None,
|
||||
name: Optional[Text] = None,
|
||||
content: Optional[Iterable[str]] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initializer. Takes optional type, content, and name.
|
||||
@ -734,10 +733,10 @@ class WildcardPattern(BasePattern):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content: Optional[Text] = None,
|
||||
content: Optional[str] = None,
|
||||
min: int = 0,
|
||||
max: int = HUGE,
|
||||
name: Optional[Text] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initializer.
|
||||
|
Loading…
Reference in New Issue
Block a user