Run pyupgrade on blib2to3 and src (#3771)

This commit is contained in:
Shantanu 2023-07-09 15:05:01 -07:00 committed by GitHub
parent 114e8357e6
commit 0b4d7d55f7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 102 additions and 112 deletions

View File

@ -42,7 +42,7 @@
import colorama # noqa: F401 import colorama # noqa: F401
@lru_cache() @lru_cache
def find_project_root( def find_project_root(
srcs: Sequence[str], stdin_filename: Optional[str] = None srcs: Sequence[str], stdin_filename: Optional[str] = None
) -> Tuple[Path, str]: ) -> Tuple[Path, str]:
@ -212,7 +212,7 @@ def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:
return SpecifierSet(",".join(str(s) for s in specifiers)) return SpecifierSet(",".join(str(s) for s in specifiers))
@lru_cache() @lru_cache
def find_user_pyproject_toml() -> Path: def find_user_pyproject_toml() -> Path:
r"""Return the path to the top-level user configuration for black. r"""Return the path to the top-level user configuration for black.
@ -232,7 +232,7 @@ def find_user_pyproject_toml() -> Path:
return user_config_path.resolve() return user_config_path.resolve()
@lru_cache() @lru_cache
def get_gitignore(root: Path) -> PathSpec: def get_gitignore(root: Path) -> PathSpec:
"""Return a PathSpec matching gitignore content if present.""" """Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore" gitignore = root / ".gitignore"

View File

@ -55,7 +55,7 @@ class Replacement:
src: str src: str
@lru_cache() @lru_cache
def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool: def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool:
try: try:
# isort: off # isort: off

View File

@ -63,7 +63,7 @@ def parse_graminit_h(self, filename):
try: try:
f = open(filename) f = open(filename)
except OSError as err: except OSError as err:
print("Can't open %s: %s" % (filename, err)) print(f"Can't open {filename}: {err}")
return False return False
self.symbol2number = {} self.symbol2number = {}
self.number2symbol = {} self.number2symbol = {}
@ -72,7 +72,7 @@ def parse_graminit_h(self, filename):
lineno += 1 lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip(): if not mo and line.strip():
print("%s(%s): can't parse %s" % (filename, lineno, line.strip())) print(f"{filename}({lineno}): can't parse {line.strip()}")
else: else:
symbol, number = mo.groups() symbol, number = mo.groups()
number = int(number) number = int(number)
@ -113,7 +113,7 @@ def parse_graminit_c(self, filename):
try: try:
f = open(filename) f = open(filename)
except OSError as err: except OSError as err:
print("Can't open %s: %s" % (filename, err)) print(f"Can't open {filename}: {err}")
return False return False
# The code below essentially uses f's iterator-ness! # The code below essentially uses f's iterator-ness!
lineno = 0 lineno = 0

View File

@ -28,11 +28,8 @@
Iterable, Iterable,
List, List,
Optional, Optional,
Text,
Iterator, Iterator,
Tuple, Tuple,
TypeVar,
Generic,
Union, Union,
) )
from contextlib import contextmanager from contextlib import contextmanager
@ -116,7 +113,7 @@ def can_advance(self, to: int) -> bool:
return True return True
class Driver(object): class Driver:
def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None: def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
self.grammar = grammar self.grammar = grammar
if logger is None: if logger is None:
@ -189,30 +186,30 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
assert p.rootnode is not None assert p.rootnode is not None
return p.rootnode return p.rootnode
def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL: def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree.""" """Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar) tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
return self.parse_tokens(tokens, debug) return self.parse_tokens(tokens, debug)
def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL: def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree.""" """Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug) return self.parse_stream_raw(stream, debug)
def parse_file( def parse_file(
self, filename: Path, encoding: Optional[Text] = None, debug: bool = False self, filename: Path, encoding: Optional[str] = None, debug: bool = False
) -> NL: ) -> NL:
"""Parse a file and return the syntax tree.""" """Parse a file and return the syntax tree."""
with io.open(filename, "r", encoding=encoding) as stream: with open(filename, encoding=encoding) as stream:
return self.parse_stream(stream, debug) return self.parse_stream(stream, debug)
def parse_string(self, text: Text, debug: bool = False) -> NL: def parse_string(self, text: str, debug: bool = False) -> NL:
"""Parse a string and return the syntax tree.""" """Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens( tokens = tokenize.generate_tokens(
io.StringIO(text).readline, grammar=self.grammar io.StringIO(text).readline, grammar=self.grammar
) )
return self.parse_tokens(tokens, debug) return self.parse_tokens(tokens, debug)
def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]: def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
lines: List[str] = [] lines: List[str] = []
current_line = "" current_line = ""
current_column = 0 current_column = 0
@ -240,7 +237,7 @@ def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Te
return "".join(lines), current_line return "".join(lines), current_line
def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text: def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str:
head, tail = os.path.splitext(gt) head, tail = os.path.splitext(gt)
if tail == ".txt": if tail == ".txt":
tail = "" tail = ""
@ -252,8 +249,8 @@ def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:
def load_grammar( def load_grammar(
gt: Text = "Grammar.txt", gt: str = "Grammar.txt",
gp: Optional[Text] = None, gp: Optional[str] = None,
save: bool = True, save: bool = True,
force: bool = False, force: bool = False,
logger: Optional[Logger] = None, logger: Optional[Logger] = None,
@ -276,7 +273,7 @@ def load_grammar(
return g return g
def _newer(a: Text, b: Text) -> bool: def _newer(a: str, b: str) -> bool:
"""Inquire whether file a was written since file b.""" """Inquire whether file a was written since file b."""
if not os.path.exists(a): if not os.path.exists(a):
return False return False
@ -286,7 +283,7 @@ def _newer(a: Text, b: Text) -> bool:
def load_packaged_grammar( def load_packaged_grammar(
package: str, grammar_source: Text, cache_dir: Optional[Path] = None package: str, grammar_source: str, cache_dir: Optional[Path] = None
) -> grammar.Grammar: ) -> grammar.Grammar:
"""Normally, loads a pickled grammar by doing """Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar) pkgutil.get_data(package, pickled_grammar)
@ -309,7 +306,7 @@ def load_packaged_grammar(
return g return g
def main(*args: Text) -> bool: def main(*args: str) -> bool:
"""Main program, when run as a script: produce grammar pickle files. """Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file. Calls load_grammar for each argument, a path to a grammar text file.

View File

@ -16,19 +16,19 @@
import os import os
import pickle import pickle
import tempfile import tempfile
from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar, Union from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
# Local imports # Local imports
from . import token from . import token
_P = TypeVar("_P", bound="Grammar") _P = TypeVar("_P", bound="Grammar")
Label = Tuple[int, Optional[Text]] Label = Tuple[int, Optional[str]]
DFA = List[List[Tuple[int, int]]] DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]] DFAS = Tuple[DFA, Dict[int, int]]
Path = Union[str, "os.PathLike[str]"] Path = Union[str, "os.PathLike[str]"]
class Grammar(object): class Grammar:
"""Pgen parsing tables conversion class. """Pgen parsing tables conversion class.
Once initialized, this class supplies the grammar tables for the Once initialized, this class supplies the grammar tables for the

View File

@ -5,10 +5,10 @@
import re import re
from typing import Dict, Match, Text from typing import Dict, Match
simple_escapes: Dict[Text, Text] = { simple_escapes: Dict[str, str] = {
"a": "\a", "a": "\a",
"b": "\b", "b": "\b",
"f": "\f", "f": "\f",
@ -22,7 +22,7 @@
} }
def escape(m: Match[Text]) -> Text: def escape(m: Match[str]) -> str:
all, tail = m.group(0, 1) all, tail = m.group(0, 1)
assert all.startswith("\\") assert all.startswith("\\")
esc = simple_escapes.get(tail) esc = simple_escapes.get(tail)
@ -44,7 +44,7 @@ def escape(m: Match[Text]) -> Text:
return chr(i) return chr(i)
def evalString(s: Text) -> Text: def evalString(s: str) -> str:
assert s.startswith("'") or s.startswith('"'), repr(s[:1]) assert s.startswith("'") or s.startswith('"'), repr(s[:1])
q = s[0] q = s[0]
if s[:3] == q * 3: if s[:3] == q * 3:

View File

@ -9,7 +9,6 @@
how this parsing engine works. how this parsing engine works.
""" """
import copy
from contextlib import contextmanager from contextlib import contextmanager
# Local imports # Local imports
@ -18,7 +17,6 @@
cast, cast,
Any, Any,
Optional, Optional,
Text,
Union, Union,
Tuple, Tuple,
Dict, Dict,
@ -35,7 +33,7 @@
from blib2to3.pgen2.driver import TokenProxy from blib2to3.pgen2.driver import TokenProxy
Results = Dict[Text, NL] Results = Dict[str, NL]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]] Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = List[List[Tuple[int, int]]] DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]] DFAS = Tuple[DFA, Dict[int, int]]
@ -100,7 +98,7 @@ def backtrack(self) -> Iterator[None]:
finally: finally:
self.parser.is_backtracking = is_backtracking self.parser.is_backtracking = is_backtracking
def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
func: Callable[..., Any] func: Callable[..., Any]
if raw: if raw:
func = self.parser._addtoken func = self.parser._addtoken
@ -114,7 +112,7 @@ def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
args.insert(0, ilabel) args.insert(0, ilabel)
func(*args) func(*args)
def determine_route(self, value: Optional[Text] = None, force: bool = False) -> Optional[int]: def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]:
alive_ilabels = self.ilabels alive_ilabels = self.ilabels
if len(alive_ilabels) == 0: if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels *_, most_successful_ilabel = self._dead_ilabels
@ -131,10 +129,10 @@ class ParseError(Exception):
"""Exception to signal the parser is stuck.""" """Exception to signal the parser is stuck."""
def __init__( def __init__(
self, msg: Text, type: Optional[int], value: Optional[Text], context: Context self, msg: str, type: Optional[int], value: Optional[str], context: Context
) -> None: ) -> None:
Exception.__init__( Exception.__init__(
self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context) self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
) )
self.msg = msg self.msg = msg
self.type = type self.type = type
@ -142,7 +140,7 @@ def __init__(
self.context = context self.context = context
class Parser(object): class Parser:
"""Parser engine. """Parser engine.
The proper usage sequence is: The proper usage sequence is:
@ -236,7 +234,7 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
self.used_names: Set[str] = set() self.used_names: Set[str] = set()
self.proxy = proxy self.proxy = proxy
def addtoken(self, type: int, value: Text, context: Context) -> bool: def addtoken(self, type: int, value: str, context: Context) -> bool:
"""Add a token; return True iff this is the end of the program.""" """Add a token; return True iff this is the end of the program."""
# Map from token to label # Map from token to label
ilabels = self.classify(type, value, context) ilabels = self.classify(type, value, context)
@ -284,7 +282,7 @@ def addtoken(self, type: int, value: Text, context: Context) -> bool:
return self._addtoken(ilabel, type, value, context) return self._addtoken(ilabel, type, value, context)
def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool: def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool:
# Loop until the token is shifted; may raise exceptions # Loop until the token is shifted; may raise exceptions
while True: while True:
dfa, state, node = self.stack[-1] dfa, state, node = self.stack[-1]
@ -329,7 +327,7 @@ def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bo
# No success finding a transition # No success finding a transition
raise ParseError("bad input", type, value, context) raise ParseError("bad input", type, value, context)
def classify(self, type: int, value: Text, context: Context) -> List[int]: def classify(self, type: int, value: str, context: Context) -> List[int]:
"""Turn a token into a label. (Internal) """Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not, Depending on whether the value is a soft-keyword or not,
@ -352,7 +350,7 @@ def classify(self, type: int, value: Text, context: Context) -> List[int]:
raise ParseError("bad token", type, value, context) raise ParseError("bad token", type, value, context)
return [ilabel] return [ilabel]
def shift(self, type: int, value: Text, newstate: int, context: Context) -> None: def shift(self, type: int, value: str, newstate: int, context: Context) -> None:
"""Shift a token. (Internal)""" """Shift a token. (Internal)"""
if self.is_backtracking: if self.is_backtracking:
dfa, state, _ = self.stack[-1] dfa, state, _ = self.stack[-1]

View File

@ -11,7 +11,6 @@
Iterator, Iterator,
List, List,
Optional, Optional,
Text,
Tuple, Tuple,
Union, Union,
Sequence, Sequence,
@ -29,13 +28,13 @@ class PgenGrammar(grammar.Grammar):
pass pass
class ParserGenerator(object): class ParserGenerator:
filename: Path filename: Path
stream: IO[Text] stream: IO[str]
generator: Iterator[GoodTokenInfo] generator: Iterator[GoodTokenInfo]
first: Dict[Text, Optional[Dict[Text, int]]] first: Dict[str, Optional[Dict[str, int]]]
def __init__(self, filename: Path, stream: Optional[IO[Text]] = None) -> None: def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
close_stream = None close_stream = None
if stream is None: if stream is None:
stream = open(filename, encoding="utf-8") stream = open(filename, encoding="utf-8")
@ -75,7 +74,7 @@ def make_grammar(self) -> PgenGrammar:
c.start = c.symbol2number[self.startsymbol] c.start = c.symbol2number[self.startsymbol]
return c return c
def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]:
rawfirst = self.first[name] rawfirst = self.first[name]
assert rawfirst is not None assert rawfirst is not None
first = {} first = {}
@ -85,7 +84,7 @@ def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]:
first[ilabel] = 1 first[ilabel] = 1
return first return first
def make_label(self, c: PgenGrammar, label: Text) -> int: def make_label(self, c: PgenGrammar, label: str) -> int:
# XXX Maybe this should be a method on a subclass of converter? # XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels) ilabel = len(c.labels)
if label[0].isalpha(): if label[0].isalpha():
@ -144,7 +143,7 @@ def addfirstsets(self) -> None:
self.calcfirst(name) self.calcfirst(name)
# print name, self.first[name].keys() # print name, self.first[name].keys()
def calcfirst(self, name: Text) -> None: def calcfirst(self, name: str) -> None:
dfa = self.dfas[name] dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion self.first[name] = None # dummy to detect left recursion
state = dfa[0] state = dfa[0]
@ -176,7 +175,7 @@ def calcfirst(self, name: Text) -> None:
inverse[symbol] = label inverse[symbol] = label
self.first[name] = totalset self.first[name] = totalset
def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text]: def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
dfas = {} dfas = {}
startsymbol: Optional[str] = None startsymbol: Optional[str] = None
# MSTART: (NEWLINE | RULE)* ENDMARKER # MSTART: (NEWLINE | RULE)* ENDMARKER
@ -240,7 +239,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
state.addarc(st, label) state.addarc(st, label)
return states # List of DFAState instances; first one is start return states # List of DFAState instances; first one is start
def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None: def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None:
print("Dump of NFA for", name) print("Dump of NFA for", name)
todo = [start] todo = [start]
for i, state in enumerate(todo): for i, state in enumerate(todo):
@ -256,7 +255,7 @@ def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None:
else: else:
print(" %s -> %d" % (label, j)) print(" %s -> %d" % (label, j))
def dump_dfa(self, name: Text, dfa: Sequence["DFAState"]) -> None: def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
print("Dump of DFA for", name) print("Dump of DFA for", name)
for i, state in enumerate(dfa): for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "") print(" State", i, state.isfinal and "(final)" or "")
@ -349,7 +348,7 @@ def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
) )
assert False assert False
def expect(self, type: int, value: Optional[Any] = None) -> Text: def expect(self, type: int, value: Optional[Any] = None) -> str:
if self.type != type or (value is not None and self.value != value): if self.type != type or (value is not None and self.value != value):
self.raise_error( self.raise_error(
"expected %s/%s, got %s/%s", type, value, self.type, self.value "expected %s/%s, got %s/%s", type, value, self.type, self.value
@ -374,22 +373,22 @@ def raise_error(self, msg: str, *args: Any) -> NoReturn:
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
class NFAState(object): class NFAState:
arcs: List[Tuple[Optional[Text], "NFAState"]] arcs: List[Tuple[Optional[str], "NFAState"]]
def __init__(self) -> None: def __init__(self) -> None:
self.arcs = [] # list of (label, NFAState) pairs self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next: "NFAState", label: Optional[Text] = None) -> None: def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
assert label is None or isinstance(label, str) assert label is None or isinstance(label, str)
assert isinstance(next, NFAState) assert isinstance(next, NFAState)
self.arcs.append((label, next)) self.arcs.append((label, next))
class DFAState(object): class DFAState:
nfaset: Dict[NFAState, Any] nfaset: Dict[NFAState, Any]
isfinal: bool isfinal: bool
arcs: Dict[Text, "DFAState"] arcs: Dict[str, "DFAState"]
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
assert isinstance(nfaset, dict) assert isinstance(nfaset, dict)
@ -399,7 +398,7 @@ def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
self.isfinal = final in nfaset self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState self.arcs = {} # map from label to DFAState
def addarc(self, next: "DFAState", label: Text) -> None: def addarc(self, next: "DFAState", label: str) -> None:
assert isinstance(label, str) assert isinstance(label, str)
assert label not in self.arcs assert label not in self.arcs
assert isinstance(next, DFAState) assert isinstance(next, DFAState)

View File

@ -1,6 +1,5 @@
"""Token constants (from "token.h").""" """Token constants (from "token.h")."""
import sys
from typing import Dict from typing import Dict
from typing import Final from typing import Final
@ -75,7 +74,7 @@
tok_name: Final[Dict[int, str]] = {} tok_name: Final[Dict[int, str]] = {}
for _name, _value in list(globals().items()): for _name, _value in list(globals().items()):
if type(_value) is type(0): if type(_value) is int:
tok_name[_value] = _name tok_name[_value] = _name

View File

@ -35,7 +35,6 @@
List, List,
Optional, Optional,
Set, Set,
Text,
Tuple, Tuple,
Pattern, Pattern,
Union, Union,
@ -77,7 +76,7 @@ def maybe(*choices: str) -> str:
def _combinations(*l: str) -> Set[str]: def _combinations(*l: str) -> Set[str]:
return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()}
Whitespace = r"[ \f\t]*" Whitespace = r"[ \f\t]*"
@ -189,7 +188,7 @@ class StopTokenizing(Exception):
def printtoken( def printtoken(
type: int, token: Text, srow_col: Coord, erow_col: Coord, line: Text type: int, token: str, srow_col: Coord, erow_col: Coord, line: str
) -> None: # for testing ) -> None: # for testing
(srow, scol) = srow_col (srow, scol) = srow_col
(erow, ecol) = erow_col (erow, ecol) = erow_col
@ -198,10 +197,10 @@ def printtoken(
) )
TokenEater = Callable[[int, Text, Coord, Coord, Text], None] TokenEater = Callable[[int, str, Coord, Coord, str], None]
def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None: def tokenize(readline: Callable[[], str], tokeneater: TokenEater = printtoken) -> None:
""" """
The tokenize() function accepts two parameters: one representing the The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize(). input stream, and one providing an output mechanism for tokenize().
@ -221,17 +220,17 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken)
# backwards compatible interface # backwards compatible interface
def tokenize_loop(readline: Callable[[], Text], tokeneater: TokenEater) -> None: def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None:
for token_info in generate_tokens(readline): for token_info in generate_tokens(readline):
tokeneater(*token_info) tokeneater(*token_info)
GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text] GoodTokenInfo = Tuple[int, str, Coord, Coord, str]
TokenInfo = Union[Tuple[int, str], GoodTokenInfo] TokenInfo = Union[Tuple[int, str], GoodTokenInfo]
class Untokenizer: class Untokenizer:
tokens: List[Text] tokens: List[str]
prev_row: int prev_row: int
prev_col: int prev_col: int
@ -247,13 +246,13 @@ def add_whitespace(self, start: Coord) -> None:
if col_offset: if col_offset:
self.tokens.append(" " * col_offset) self.tokens.append(" " * col_offset)
def untokenize(self, iterable: Iterable[TokenInfo]) -> Text: def untokenize(self, iterable: Iterable[TokenInfo]) -> str:
for t in iterable: for t in iterable:
if len(t) == 2: if len(t) == 2:
self.compat(cast(Tuple[int, str], t), iterable) self.compat(cast(Tuple[int, str], t), iterable)
break break
tok_type, token, start, end, line = cast( tok_type, token, start, end, line = cast(
Tuple[int, Text, Coord, Coord, Text], t Tuple[int, str, Coord, Coord, str], t
) )
self.add_whitespace(start) self.add_whitespace(start)
self.tokens.append(token) self.tokens.append(token)
@ -263,7 +262,7 @@ def untokenize(self, iterable: Iterable[TokenInfo]) -> Text:
self.prev_col = 0 self.prev_col = 0
return "".join(self.tokens) return "".join(self.tokens)
def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None: def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None:
startline = False startline = False
indents = [] indents = []
toks_append = self.tokens.append toks_append = self.tokens.append
@ -335,7 +334,7 @@ def read_or_stop() -> bytes:
try: try:
return readline() return readline()
except StopIteration: except StopIteration:
return bytes() return b''
def find_cookie(line: bytes) -> Optional[str]: def find_cookie(line: bytes) -> Optional[str]:
try: try:
@ -384,7 +383,7 @@ def find_cookie(line: bytes) -> Optional[str]:
return default, [first, second] return default, [first, second]
def untokenize(iterable: Iterable[TokenInfo]) -> Text: def untokenize(iterable: Iterable[TokenInfo]) -> str:
"""Transform tokens back into Python source code. """Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence Each element returned by the iterable must be a token sequence
@ -407,7 +406,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> Text:
def generate_tokens( def generate_tokens(
readline: Callable[[], Text], grammar: Optional[Grammar] = None readline: Callable[[], str], grammar: Optional[Grammar] = None
) -> Iterator[GoodTokenInfo]: ) -> Iterator[GoodTokenInfo]:
""" """
The generate_tokens() generator requires one argument, readline, which The generate_tokens() generator requires one argument, readline, which

View File

@ -9,7 +9,6 @@
from typing import Union from typing import Union
# Local imports # Local imports
from .pgen2 import token
from .pgen2 import driver from .pgen2 import driver
from .pgen2.grammar import Grammar from .pgen2.grammar import Grammar
@ -21,7 +20,7 @@
# "PatternGrammar.txt") # "PatternGrammar.txt")
class Symbols(object): class Symbols:
def __init__(self, grammar: Grammar) -> None: def __init__(self, grammar: Grammar) -> None:
"""Initializer. """Initializer.

View File

@ -18,7 +18,6 @@
Iterator, Iterator,
List, List,
Optional, Optional,
Text,
Tuple, Tuple,
TypeVar, TypeVar,
Union, Union,
@ -34,10 +33,10 @@
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs: Dict[int, Union[Text, int]] = {} _type_reprs: Dict[int, Union[str, int]] = {}
def type_repr(type_num: int) -> Union[Text, int]: def type_repr(type_num: int) -> Union[str, int]:
global _type_reprs global _type_reprs
if not _type_reprs: if not _type_reprs:
from .pygram import python_symbols from .pygram import python_symbols
@ -54,11 +53,11 @@ def type_repr(type_num: int) -> Union[Text, int]:
_P = TypeVar("_P", bound="Base") _P = TypeVar("_P", bound="Base")
NL = Union["Node", "Leaf"] NL = Union["Node", "Leaf"]
Context = Tuple[Text, Tuple[int, int]] Context = Tuple[str, Tuple[int, int]]
RawNode = Tuple[int, Optional[Text], Optional[Context], Optional[List[NL]]] RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]]
class Base(object): class Base:
""" """
Abstract base class for Node and Leaf. Abstract base class for Node and Leaf.
@ -92,7 +91,7 @@ def __eq__(self, other: Any) -> bool:
return self._eq(other) return self._eq(other)
@property @property
def prefix(self) -> Text: def prefix(self) -> str:
raise NotImplementedError raise NotImplementedError
def _eq(self: _P, other: _P) -> bool: def _eq(self: _P, other: _P) -> bool:
@ -225,7 +224,7 @@ def depth(self) -> int:
return 0 return 0
return 1 + self.parent.depth() return 1 + self.parent.depth()
def get_suffix(self) -> Text: def get_suffix(self) -> str:
""" """
Return the string immediately following the invocant node. This is Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix effectively equivalent to node.next_sibling.prefix
@ -242,14 +241,14 @@ class Node(Base):
"""Concrete implementation for interior nodes.""" """Concrete implementation for interior nodes."""
fixers_applied: Optional[List[Any]] fixers_applied: Optional[List[Any]]
used_names: Optional[Set[Text]] used_names: Optional[Set[str]]
def __init__( def __init__(
self, self,
type: int, type: int,
children: List[NL], children: List[NL],
context: Optional[Any] = None, context: Optional[Any] = None,
prefix: Optional[Text] = None, prefix: Optional[str] = None,
fixers_applied: Optional[List[Any]] = None, fixers_applied: Optional[List[Any]] = None,
) -> None: ) -> None:
""" """
@ -274,16 +273,16 @@ def __init__(
else: else:
self.fixers_applied = None self.fixers_applied = None
def __repr__(self) -> Text: def __repr__(self) -> str:
"""Return a canonical string representation.""" """Return a canonical string representation."""
assert self.type is not None assert self.type is not None
return "%s(%s, %r)" % ( return "{}({}, {!r})".format(
self.__class__.__name__, self.__class__.__name__,
type_repr(self.type), type_repr(self.type),
self.children, self.children,
) )
def __str__(self) -> Text: def __str__(self) -> str:
""" """
Return a pretty string representation. Return a pretty string representation.
@ -317,7 +316,7 @@ def pre_order(self) -> Iterator[NL]:
yield from child.pre_order() yield from child.pre_order()
@property @property
def prefix(self) -> Text: def prefix(self) -> str:
""" """
The whitespace and comments preceding this node in the input. The whitespace and comments preceding this node in the input.
""" """
@ -326,7 +325,7 @@ def prefix(self) -> Text:
return self.children[0].prefix return self.children[0].prefix
@prefix.setter @prefix.setter
def prefix(self, prefix: Text) -> None: def prefix(self, prefix: str) -> None:
if self.children: if self.children:
self.children[0].prefix = prefix self.children[0].prefix = prefix
@ -383,12 +382,12 @@ class Leaf(Base):
"""Concrete implementation for leaf nodes.""" """Concrete implementation for leaf nodes."""
# Default values for instance variables # Default values for instance variables
value: Text value: str
fixers_applied: List[Any] fixers_applied: List[Any]
bracket_depth: int bracket_depth: int
# Changed later in brackets.py # Changed later in brackets.py
opening_bracket: Optional["Leaf"] = None opening_bracket: Optional["Leaf"] = None
used_names: Optional[Set[Text]] used_names: Optional[Set[str]]
_prefix = "" # Whitespace and comments preceding this token in the input _prefix = "" # Whitespace and comments preceding this token in the input
lineno: int = 0 # Line where this token starts in the input lineno: int = 0 # Line where this token starts in the input
column: int = 0 # Column where this token starts in the input column: int = 0 # Column where this token starts in the input
@ -400,9 +399,9 @@ class Leaf(Base):
def __init__( def __init__(
self, self,
type: int, type: int,
value: Text, value: str,
context: Optional[Context] = None, context: Optional[Context] = None,
prefix: Optional[Text] = None, prefix: Optional[str] = None,
fixers_applied: List[Any] = [], fixers_applied: List[Any] = [],
opening_bracket: Optional["Leaf"] = None, opening_bracket: Optional["Leaf"] = None,
fmt_pass_converted_first_leaf: Optional["Leaf"] = None, fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
@ -431,13 +430,13 @@ def __repr__(self) -> str:
from .pgen2.token import tok_name from .pgen2.token import tok_name
assert self.type is not None assert self.type is not None
return "%s(%s, %r)" % ( return "{}({}, {!r})".format(
self.__class__.__name__, self.__class__.__name__,
tok_name.get(self.type, self.type), tok_name.get(self.type, self.type),
self.value, self.value,
) )
def __str__(self) -> Text: def __str__(self) -> str:
""" """
Return a pretty string representation. Return a pretty string representation.
@ -471,14 +470,14 @@ def pre_order(self) -> Iterator["Leaf"]:
yield self yield self
@property @property
def prefix(self) -> Text: def prefix(self) -> str:
""" """
The whitespace and comments preceding this token in the input. The whitespace and comments preceding this token in the input.
""" """
return self._prefix return self._prefix
@prefix.setter @prefix.setter
def prefix(self, prefix: Text) -> None: def prefix(self, prefix: str) -> None:
self.changed() self.changed()
self._prefix = prefix self._prefix = prefix
@ -503,10 +502,10 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL:
return Leaf(type, value or "", context=context) return Leaf(type, value or "", context=context)
_Results = Dict[Text, NL] _Results = Dict[str, NL]
class BasePattern(object): class BasePattern:
""" """
A pattern is a tree matching pattern. A pattern is a tree matching pattern.
@ -526,19 +525,19 @@ class BasePattern(object):
type: Optional[int] type: Optional[int]
type = None # Node type (token if < 256, symbol if >= 256) type = None # Node type (token if < 256, symbol if >= 256)
content: Any = None # Optional content matching pattern content: Any = None # Optional content matching pattern
name: Optional[Text] = None # Optional name used to store match in results dict name: Optional[str] = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds): def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated.""" """Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern" assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls) return object.__new__(cls)
def __repr__(self) -> Text: def __repr__(self) -> str:
assert self.type is not None assert self.type is not None
args = [type_repr(self.type), self.content, self.name] args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None: while args and args[-1] is None:
del args[-1] del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, args)))
def _submatch(self, node, results=None) -> bool: def _submatch(self, node, results=None) -> bool:
raise NotImplementedError raise NotImplementedError
@ -602,8 +601,8 @@ class LeafPattern(BasePattern):
def __init__( def __init__(
self, self,
type: Optional[int] = None, type: Optional[int] = None,
content: Optional[Text] = None, content: Optional[str] = None,
name: Optional[Text] = None, name: Optional[str] = None,
) -> None: ) -> None:
""" """
Initializer. Takes optional type, content, and name. Initializer. Takes optional type, content, and name.
@ -653,8 +652,8 @@ class NodePattern(BasePattern):
def __init__( def __init__(
self, self,
type: Optional[int] = None, type: Optional[int] = None,
content: Optional[Iterable[Text]] = None, content: Optional[Iterable[str]] = None,
name: Optional[Text] = None, name: Optional[str] = None,
) -> None: ) -> None:
""" """
Initializer. Takes optional type, content, and name. Initializer. Takes optional type, content, and name.
@ -734,10 +733,10 @@ class WildcardPattern(BasePattern):
def __init__( def __init__(
self, self,
content: Optional[Text] = None, content: Optional[str] = None,
min: int = 0, min: int = 0,
max: int = HUGE, max: int = HUGE,
name: Optional[Text] = None, name: Optional[str] = None,
) -> None: ) -> None:
""" """
Initializer. Initializer.