Compare commits

...

10 Commits

Author SHA1 Message Date
GiGaGon
7987951e24
Convert legacy string formatting to f-strings (#4685)
* the changes

* Update driver.py
2025-06-05 18:51:26 -07:00
GiGaGon
e5e5dad792
Fix await ellipses and remove async/await soft keyword/identifier support (#4676)
* Update tokenize.py

* Update driver.py

* Update test_black.py

* Update test_black.py

* Update python37.py

* Update tokenize.py

* Update CHANGES.md

* Update CHANGES.md

* Update faq.md

* Update driver.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2025-06-05 18:50:42 -07:00
GiGaGon
24e4cb20ab
Fix backslash cr nl bug (#4673)
* Update tokenize.py

* Update CHANGES.md

* Update test_black.py

* Update test_black.py

* Update test_black.py
2025-06-05 18:49:15 -07:00
GiGaGon
e7bf7b4619
Fix CI mypyc 1.16 failure (#4671) 2025-05-29 14:10:29 -07:00
cobalt
71e380aedf
CI: Remove now-uneeded workarounds (#4665) 2025-05-25 18:23:42 -05:00
dependabot[bot]
2630801f95
Bump pypa/cibuildwheel from 2.22.0 to 2.23.3 (#4660)
Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.22.0 to 2.23.3.
- [Release notes](https://github.com/pypa/cibuildwheel/releases)
- [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md)
- [Commits](https://github.com/pypa/cibuildwheel/compare/v2.22.0...v2.23.3)

---
updated-dependencies:
- dependency-name: pypa/cibuildwheel
  dependency-version: 2.23.3
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-15 07:22:11 -05:00
danigm
b0f36f5b42
Update test_code_option_safe to work with click 8.2.0 (#4666) 2025-05-15 07:04:00 -05:00
cobalt
314f8cf92b
Update Prettier pre-commit configuration (#4662)
* Update Prettier configuration

Signed-off-by: cobalt <61329810+cobaltt7@users.noreply.github.com>

* Update .github/workflows/diff_shades.yml

Co-authored-by: Jelle Zijlstra <jelle.zijlstra@gmail.com>

---------

Signed-off-by: cobalt <61329810+cobaltt7@users.noreply.github.com>
Co-authored-by: Jelle Zijlstra <jelle.zijlstra@gmail.com>
2025-05-11 19:21:50 -05:00
Pedro Mezacasa Muller
d0ff3bd6cb
Fix crash when a tuple is used as a ContextManager (#4646) 2025-04-08 21:42:17 -07:00
pre-commit-ci[bot]
a41dc89f1f
[pre-commit.ci] pre-commit autoupdate (#4644)
updates:
- [github.com/pycqa/isort: 5.13.2 → 6.0.1](https://github.com/pycqa/isort/compare/5.13.2...6.0.1)
- [github.com/pycqa/flake8: 7.1.1 → 7.2.0](https://github.com/pycqa/flake8/compare/7.1.1...7.2.0)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2025-04-07 14:45:01 -07:00
21 changed files with 188 additions and 198 deletions

View File

@ -34,7 +34,8 @@ jobs:
env: env:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ github.token }}
run: > run: >
python scripts/diff_shades_gha_helper.py config ${{ github.event_name }} ${{ matrix.mode }} python scripts/diff_shades_gha_helper.py config ${{ github.event_name }}
${{ matrix.mode }}
analysis: analysis:
name: analysis / ${{ matrix.mode }} name: analysis / ${{ matrix.mode }}
@ -48,7 +49,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
include: ${{ fromJson(needs.configure.outputs.matrix )}} include: ${{ fromJson(needs.configure.outputs.matrix) }}
steps: steps:
- name: Checkout this repository (full clone) - name: Checkout this repository (full clone)
@ -130,10 +131,9 @@ jobs:
- name: Generate summary file (PR only) - name: Generate summary file (PR only)
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes' if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
run: > run: >
python helper.py comment-body python helper.py comment-body ${{ matrix.baseline-analysis }}
${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }} ${{ matrix.target-analysis }} ${{ matrix.baseline-sha }}
${{ matrix.baseline-sha }} ${{ matrix.target-sha }} ${{ matrix.target-sha }} ${{ github.event.pull_request.number }}
${{ github.event.pull_request.number }}
- name: Upload summary file (PR only) - name: Upload summary file (PR only)
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes' if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'

View File

@ -92,7 +92,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
# Keep cibuildwheel version in sync with above # Keep cibuildwheel version in sync with above
- uses: pypa/cibuildwheel@v2.22.0 - uses: pypa/cibuildwheel@v2.23.3
with: with:
only: ${{ matrix.only }} only: ${{ matrix.only }}

View File

@ -24,12 +24,12 @@ repos:
additional_dependencies: *version_check_dependencies additional_dependencies: *version_check_dependencies
- repo: https://github.com/pycqa/isort - repo: https://github.com/pycqa/isort
rev: 5.13.2 rev: 6.0.1
hooks: hooks:
- id: isort - id: isort
- repo: https://github.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: 7.1.1 rev: 7.2.0
hooks: hooks:
- id: flake8 - id: flake8
additional_dependencies: additional_dependencies:
@ -48,7 +48,9 @@ repos:
- types-PyYAML - types-PyYAML
- types-atheris - types-atheris
- tomli >= 0.2.6, < 2.0.0 - tomli >= 0.2.6, < 2.0.0
- click >= 8.1.0, != 8.1.4, != 8.1.5 - click >= 8.2.0
# Click is intentionally out-of-sync with pyproject.toml
# v8.2 has breaking changes. We work around them at runtime, but we need the newer stubs.
- packaging >= 22.0 - packaging >= 22.0
- platformdirs >= 2.1.0 - platformdirs >= 2.1.0
- pytokens >= 0.1.10 - pytokens >= 0.1.10
@ -64,11 +66,11 @@ repos:
args: ["--python-version=3.10"] args: ["--python-version=3.10"]
additional_dependencies: *mypy_deps additional_dependencies: *mypy_deps
- repo: https://github.com/pre-commit/mirrors-prettier - repo: https://github.com/rbubley/mirrors-prettier
rev: v4.0.0-alpha.8 rev: v3.5.3
hooks: hooks:
- id: prettier - id: prettier
types_or: [css, javascript, html, json, yaml] types_or: [markdown, yaml, json]
exclude: \.github/workflows/diff_shades\.yml exclude: \.github/workflows/diff_shades\.yml
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks

View File

@ -9,19 +9,24 @@
### Stable style ### Stable style
<!-- Changes that affect Black's stable style --> <!-- Changes that affect Black's stable style -->
- Fix crash while formatting a long `del` statement containing tuples (#4628) - Fix crash while formatting a long `del` statement containing tuples (#4628)
- Fix crash while formatting expressions using the walrus operator in complex - Fix crash while formatting expressions using the walrus operator in complex `with`
`with` statements (#4630) statements (#4630)
- Handle `# fmt: skip` followed by a comment at the end of file (#4635) - Handle `# fmt: skip` followed by a comment at the end of file (#4635)
- Fix crash when a tuple appears in the `as` clause of a `with` statement - Fix crash when a tuple appears in the `as` clause of a `with` statement (#4634)
(#4634) - Fix crash when tuple is used as a context manager inside a `with` statement (#4646)
- Fix crash on a `\\r\n` (#4673)
- Fix crash on `await ...` (where `...` is a literal `Ellipsis`) (#4676)
- Remove support for pre-python 3.7 `await/async` as soft keywords/variable names
(#4676)
### Preview style ### Preview style
<!-- Changes that affect Black's preview style --> <!-- Changes that affect Black's preview style -->
- Fix a bug where one-liner functions/conditionals marked with `# fmt: skip` - Fix a bug where one-liner functions/conditionals marked with `# fmt: skip` would still
would still be formatted (#4552) be formatted (#4552)
### Configuration ### Configuration
@ -36,8 +41,8 @@
<!-- Changes to the parser or to version autodetection --> <!-- Changes to the parser or to version autodetection -->
- Rewrite tokenizer to improve performance and compliance (#4536) - Rewrite tokenizer to improve performance and compliance (#4536)
- Fix bug where certain unusual expressions (e.g., lambdas) were not accepted - Fix bug where certain unusual expressions (e.g., lambdas) were not accepted in type
in type parameter bounds and defaults. (#4602) parameter bounds and defaults. (#4602)
### Performance ### Performance
@ -56,8 +61,8 @@
<!-- For example, Docker, GitHub Actions, pre-commit, editors --> <!-- For example, Docker, GitHub Actions, pre-commit, editors -->
- Fix the version check in the vim file to reject Python 3.8 (#4567) - Fix the version check in the vim file to reject Python 3.8 (#4567)
- Enhance GitHub Action `psf/black` to read Black version from an additional - Enhance GitHub Action `psf/black` to read Black version from an additional section in
section in pyproject.toml: `[project.dependency-groups]` (#4606) pyproject.toml: `[project.dependency-groups]` (#4606)
### Documentation ### Documentation
@ -68,8 +73,8 @@
### Highlights ### Highlights
This release introduces the new 2025 stable style (#4558), stabilizing This release introduces the new 2025 stable style (#4558), stabilizing the following
the following changes: changes:
- Normalize casing of Unicode escape characters in strings to lowercase (#2916) - Normalize casing of Unicode escape characters in strings to lowercase (#2916)
- Fix inconsistencies in whether certain strings are detected as docstrings (#4095) - Fix inconsistencies in whether certain strings are detected as docstrings (#4095)
@ -77,15 +82,16 @@ the following changes:
- Remove redundant parentheses in if guards for case blocks (#4214) - Remove redundant parentheses in if guards for case blocks (#4214)
- Add parentheses to if clauses in case blocks when the line is too long (#4269) - Add parentheses to if clauses in case blocks when the line is too long (#4269)
- Whitespace before `# fmt: skip` comments is no longer normalized (#4146) - Whitespace before `# fmt: skip` comments is no longer normalized (#4146)
- Fix line length computation for certain expressions that involve the power operator (#4154) - Fix line length computation for certain expressions that involve the power operator
(#4154)
- Check if there is a newline before the terminating quotes of a docstring (#4185) - Check if there is a newline before the terminating quotes of a docstring (#4185)
- Fix type annotation spacing between `*` and more complex type variable tuple (#4440) - Fix type annotation spacing between `*` and more complex type variable tuple (#4440)
The following changes were not in any previous release: The following changes were not in any previous release:
- Remove parentheses around sole list items (#4312) - Remove parentheses around sole list items (#4312)
- Generic function definitions are now formatted more elegantly: parameters are - Generic function definitions are now formatted more elegantly: parameters are split
split over multiple lines first instead of type parameter definitions (#4553) over multiple lines first instead of type parameter definitions (#4553)
### Stable style ### Stable style

View File

@ -137,8 +137,8 @@ SQLAlchemy, Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtuale
pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant, pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant,
Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more. Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more.
The following organizations use _Black_: Dropbox, KeepTruckin, Lyft, Mozilla, The following organizations use _Black_: Dropbox, KeepTruckin, Lyft, Mozilla, Quora,
Quora, Duolingo, QuantumBlack, Tesla, Archer Aviation. Duolingo, QuantumBlack, Tesla, Archer Aviation.
Are we missing anyone? Let us know. Are we missing anyone? Let us know.

View File

@ -29,8 +29,8 @@ frequently than monthly nets rapidly diminishing returns.
**You must have `write` permissions for the _Black_ repository to cut a release.** **You must have `write` permissions for the _Black_ repository to cut a release.**
The 10,000 foot view of the release process is that you prepare a release PR and then The 10,000 foot view of the release process is that you prepare a release PR and then
publish a [GitHub Release]. This triggers [release automation](#release-workflows) that builds publish a [GitHub Release]. This triggers [release automation](#release-workflows) that
all release artifacts and publishes them to the various platforms we publish to. builds all release artifacts and publishes them to the various platforms we publish to.
We now have a `scripts/release.py` script to help with cutting the release PRs. We now have a `scripts/release.py` script to help with cutting the release PRs.
@ -96,8 +96,9 @@ In the end, use your best judgement and ask other maintainers for their thoughts
## Release workflows ## Release workflows
All of _Black_'s release automation uses [GitHub Actions]. All workflows are therefore configured All of _Black_'s release automation uses [GitHub Actions]. All workflows are therefore
using YAML files in the `.github/workflows` directory of the _Black_ repository. configured using YAML files in the `.github/workflows` directory of the _Black_
repository.
They are triggered by the publication of a [GitHub Release]. They are triggered by the publication of a [GitHub Release].

View File

@ -93,6 +93,8 @@ Support for formatting Python 2 code was removed in version 22.0. While we've ma
plans to stop supporting older Python 3 minor versions immediately, their support might plans to stop supporting older Python 3 minor versions immediately, their support might
also be removed some time in the future without a deprecation period. also be removed some time in the future without a deprecation period.
`await`/`async` as soft keywords/indentifiers are no longer supported as of 25.2.0.
Runtime support for 3.6 was removed in version 22.10.0, for 3.7 in version 23.7.0, and Runtime support for 3.6 was removed in version 22.10.0, for 3.7 in version 23.7.0, and
for 3.8 in version 24.10.0. for 3.8 in version 24.10.0.

View File

@ -38,9 +38,9 @@ the `pyproject.toml` file. `version` can be any
or just the version number if you want an exact version. To read the version from the or just the version number if you want an exact version. To read the version from the
`pyproject.toml` file instead, set `use_pyproject` to `true`. This will first look into `pyproject.toml` file instead, set `use_pyproject` to `true`. This will first look into
the `tool.black.required-version` field, then the `dependency-groups` table, then the the `tool.black.required-version` field, then the `dependency-groups` table, then the
`project.dependencies` array and finally the `project.optional-dependencies` table. `project.dependencies` array and finally the `project.optional-dependencies` table. The
The action defaults to the latest release available on PyPI. Only versions available action defaults to the latest release available on PyPI. Only versions available from
from PyPI are supported, so no commit SHAs or branch names. PyPI are supported, so no commit SHAs or branch names.
If you want to include Jupyter Notebooks, _Black_ must be installed with the `jupyter` If you want to include Jupyter Notebooks, _Black_ must be installed with the `jupyter`
extra. Installing the extra and including Jupyter Notebook files can be configured via extra. Installing the extra and including Jupyter Notebook files can be configured via

View File

@ -5,14 +5,11 @@
a coverage-guided fuzzer I'm working on. a coverage-guided fuzzer I'm working on.
""" """
import re
import hypothesmith import hypothesmith
from hypothesis import HealthCheck, given, settings from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st from hypothesis import strategies as st
import black import black
from blib2to3.pgen2.tokenize import TokenError
# This test uses the Hypothesis and Hypothesmith libraries to generate random # This test uses the Hypothesis and Hypothesmith libraries to generate random
@ -45,23 +42,7 @@ def test_idempotent_any_syntatically_valid_python(
compile(src_contents, "<string>", "exec") # else the bug is in hypothesmith compile(src_contents, "<string>", "exec") # else the bug is in hypothesmith
# Then format the code... # Then format the code...
try:
dst_contents = black.format_str(src_contents, mode=mode) dst_contents = black.format_str(src_contents, mode=mode)
except black.InvalidInput:
# This is a bug - if it's valid Python code, as above, Black should be
# able to cope with it. See issues #970, #1012
# TODO: remove this try-except block when issues are resolved.
return
except TokenError as e:
if ( # Special-case logic for backslashes followed by newlines or end-of-input
e.args[0] == "EOF in multi-line statement"
and re.search(r"\\($|\r?\n)", src_contents) is not None
):
# This is a bug - if it's valid Python code, as above, Black should be
# able to cope with it. See issue #1012.
# TODO: remove this block when the issue is resolved.
return
raise
# And check that we got equivalent and stable output. # And check that we got equivalent and stable output.
black.assert_equivalent(src_contents, dst_contents) black.assert_equivalent(src_contents, dst_contents)

View File

@ -77,7 +77,7 @@ def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> in
git("commit", "--allow-empty", "-aqC", commit) git("commit", "--allow-empty", "-aqC", commit)
for commit in commits: for commit in commits:
git("branch", "-qD", "%s-black" % commit) git("branch", "-qD", f"{commit}-black")
return 0 return 0

View File

@ -40,6 +40,7 @@
ensure_visible, ensure_visible,
fstring_to_string, fstring_to_string,
get_annotation_type, get_annotation_type,
has_sibling_with_type,
is_arith_like, is_arith_like,
is_async_stmt_or_funcdef, is_async_stmt_or_funcdef,
is_atom_with_invisible_parens, is_atom_with_invisible_parens,
@ -1628,6 +1629,11 @@ def maybe_make_parens_invisible_in_atom(
or is_empty_tuple(node) or is_empty_tuple(node)
or is_one_tuple(node) or is_one_tuple(node)
or (is_tuple(node) and parent.type == syms.asexpr_test) or (is_tuple(node) and parent.type == syms.asexpr_test)
or (
is_tuple(node)
and parent.type == syms.with_stmt
and has_sibling_with_type(node, token.COMMA)
)
or (is_yield(node) and parent.type != syms.expr_stmt) or (is_yield(node) and parent.type != syms.expr_stmt)
or ( or (
# This condition tries to prevent removing non-optional brackets # This condition tries to prevent removing non-optional brackets

View File

@ -1058,3 +1058,21 @@ def furthest_ancestor_with_last_leaf(leaf: Leaf) -> LN:
while node.parent and node.parent.children and node is node.parent.children[-1]: while node.parent and node.parent.children and node is node.parent.children[-1]:
node = node.parent node = node.parent
return node return node
def has_sibling_with_type(node: LN, type: int) -> bool:
# Check previous siblings
sibling = node.prev_sibling
while sibling is not None:
if sibling.type == type:
return True
sibling = sibling.prev_sibling
# Check next siblings
sibling = node.next_sibling
while sibling is not None:
if sibling.type == type:
return True
sibling = sibling.next_sibling
return False

View File

@ -28,16 +28,16 @@ def escape(m: re.Match[str]) -> str:
if tail.startswith("x"): if tail.startswith("x"):
hexes = tail[1:] hexes = tail[1:]
if len(hexes) < 2: if len(hexes) < 2:
raise ValueError("invalid hex string escape ('\\%s')" % tail) raise ValueError(f"invalid hex string escape ('\\{tail}')")
try: try:
i = int(hexes, 16) i = int(hexes, 16)
except ValueError: except ValueError:
raise ValueError("invalid hex string escape ('\\%s')" % tail) from None raise ValueError(f"invalid hex string escape ('\\{tail}')") from None
else: else:
try: try:
i = int(tail, 8) i = int(tail, 8)
except ValueError: except ValueError:
raise ValueError("invalid octal string escape ('\\%s')" % tail) from None raise ValueError(f"invalid octal string escape ('\\{tail}')") from None
return chr(i) return chr(i)

View File

@ -89,18 +89,12 @@ def backtrack(self) -> Iterator[None]:
self.parser.is_backtracking = is_backtracking self.parser.is_backtracking = is_backtracking
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None: def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
else:
func = self.parser.addtoken
for ilabel in self.ilabels: for ilabel in self.ilabels:
with self.switch_to(ilabel): with self.switch_to(ilabel):
args = [tok_type, tok_val, self.context]
if raw: if raw:
args.insert(0, ilabel) self.parser._addtoken(ilabel, tok_type, tok_val, self.context)
func(*args) else:
self.parser.addtoken(tok_type, tok_val, self.context)
def determine_route( def determine_route(
self, value: Optional[str] = None, force: bool = False self, value: Optional[str] = None, force: bool = False

View File

@ -140,7 +140,7 @@ def calcfirst(self, name: str) -> None:
if label in self.first: if label in self.first:
fset = self.first[label] fset = self.first[label]
if fset is None: if fset is None:
raise ValueError("recursion for rule %r" % name) raise ValueError(f"recursion for rule {name!r}")
else: else:
self.calcfirst(label) self.calcfirst(label)
fset = self.first[label] fset = self.first[label]
@ -155,8 +155,8 @@ def calcfirst(self, name: str) -> None:
for symbol in itsfirst: for symbol in itsfirst:
if symbol in inverse: if symbol in inverse:
raise ValueError( raise ValueError(
"rule %s is ambiguous; %s is in the first sets of %s as well" f"rule {name} is ambiguous; {symbol} is in the first sets of"
" as %s" % (name, symbol, label, inverse[symbol]) f" {label} as well as {inverse[symbol]}"
) )
inverse[symbol] = label inverse[symbol] = label
self.first[name] = totalset self.first[name] = totalset
@ -237,16 +237,16 @@ def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None:
j = len(todo) j = len(todo)
todo.append(next) todo.append(next)
if label is None: if label is None:
print(" -> %d" % j) print(f" -> {j}")
else: else:
print(" %s -> %d" % (label, j)) print(f" {label} -> {j}")
def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None: def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
print("Dump of DFA for", name) print("Dump of DFA for", name)
for i, state in enumerate(dfa): for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "") print(" State", i, state.isfinal and "(final)" or "")
for label, next in sorted(state.arcs.items()): for label, next in sorted(state.arcs.items()):
print(" %s -> %d" % (label, dfa.index(next))) print(f" {label} -> {dfa.index(next)}")
def simplify_dfa(self, dfa: list["DFAState"]) -> None: def simplify_dfa(self, dfa: list["DFAState"]) -> None:
# This is not theoretically optimal, but works well enough. # This is not theoretically optimal, but works well enough.
@ -330,15 +330,12 @@ def parse_atom(self) -> tuple["NFAState", "NFAState"]:
return a, z return a, z
else: else:
self.raise_error( self.raise_error(
"expected (...) or NAME or STRING, got %s/%s", self.type, self.value f"expected (...) or NAME or STRING, got {self.type}/{self.value}"
) )
raise AssertionError
def expect(self, type: int, value: Optional[Any] = None) -> str: def expect(self, type: int, value: Optional[Any] = None) -> str:
if self.type != type or (value is not None and self.value != value): if self.type != type or (value is not None and self.value != value):
self.raise_error( self.raise_error(f"expected {type}/{value}, got {self.type}/{self.value}")
"expected %s/%s, got %s/%s", type, value, self.type, self.value
)
value = self.value value = self.value
self.gettoken() self.gettoken()
return value return value
@ -350,12 +347,7 @@ def gettoken(self) -> None:
self.type, self.value, self.begin, self.end, self.line = tup self.type, self.value, self.begin, self.end, self.line = tup
# print token.tok_name[self.type], repr(self.value) # print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg: str, *args: Any) -> NoReturn: def raise_error(self, msg: str) -> NoReturn:
if args:
try:
msg = msg % args
except Exception:
msg = " ".join([msg] + list(map(str, args)))
raise SyntaxError( raise SyntaxError(
msg, (str(self.filename), self.end[0], self.end[1], self.line) msg, (str(self.filename), self.end[0], self.end[1], self.line)
) )

View File

@ -113,7 +113,17 @@ def transform_whitespace(
and prev_token.type not in (TokenType.nl, TokenType.newline) and prev_token.type not in (TokenType.nl, TokenType.newline)
): ):
token_str = source[token.start_index : token.end_index] token_str = source[token.start_index : token.end_index]
if token_str.startswith("\\\n"): if token_str.startswith("\\\r\n"):
return pytokens.Token(
TokenType.nl,
token.start_index,
token.start_index + 3,
token.start_line,
token.start_col,
token.start_line,
token.start_col + 3,
)
elif token_str.startswith("\\\n") or token_str.startswith("\\\r"):
return pytokens.Token( return pytokens.Token(
TokenType.nl, TokenType.nl,
token.start_index, token.start_index,
@ -128,20 +138,13 @@ def transform_whitespace(
def tokenize(source: str, grammar: Optional[Grammar] = None) -> Iterator[TokenInfo]: def tokenize(source: str, grammar: Optional[Grammar] = None) -> Iterator[TokenInfo]:
async_keywords = False if grammar is None else grammar.async_keywords
lines = source.split("\n") lines = source.split("\n")
lines += [""] # For newline tokens in files that don't end in a newline lines += [""] # For newline tokens in files that don't end in a newline
line, column = 1, 0 line, column = 1, 0
token_iterator = pytokens.tokenize(source)
is_async = False
current_indent = 0
async_indent = 0
prev_token: Optional[pytokens.Token] = None prev_token: Optional[pytokens.Token] = None
try: try:
for token in token_iterator: for token in pytokens.tokenize(source):
token = transform_whitespace(token, source, prev_token) token = transform_whitespace(token, source, prev_token)
line, column = token.start_line, token.start_col line, column = token.start_line, token.start_col
@ -156,58 +159,18 @@ def tokenize(source: str, grammar: Optional[Grammar] = None) -> Iterator[TokenIn
prev_token = token prev_token = token
continue continue
if token.type == TokenType.indent:
current_indent += 1
if token.type == TokenType.dedent:
current_indent -= 1
if is_async and current_indent < async_indent:
is_async = False
source_line = lines[token.start_line - 1] source_line = lines[token.start_line - 1]
if token.type == TokenType.identifier and token_str in ("async", "await"): if token.type == TokenType.identifier and token_str in ("async", "await"):
# Black uses `async` and `await` token types just for those two keywords # Black uses `async` and `await` token types just for those two keywords
while True:
next_token = next(token_iterator)
next_str = source[next_token.start_index : next_token.end_index]
next_token = transform_whitespace(next_token, next_str, token)
if next_token.type == TokenType.whitespace:
continue
break
next_token_type = TOKEN_TYPE_MAP[next_token.type]
next_line = lines[next_token.start_line - 1]
if token_str == "async" and (
async_keywords
or (next_token_type == NAME and next_str in ("def", "for"))
):
is_async = True
async_indent = current_indent + 1
current_token_type = ASYNC
elif token_str == "await" and (async_keywords or is_async):
current_token_type = AWAIT
else:
current_token_type = TOKEN_TYPE_MAP[token.type]
yield ( yield (
current_token_type, ASYNC if token_str == "async" else AWAIT,
token_str, token_str,
(token.start_line, token.start_col), (token.start_line, token.start_col),
(token.end_line, token.end_col), (token.end_line, token.end_col),
source_line, source_line,
) )
yield ( elif token.type == TokenType.op and token_str == "...":
next_token_type,
next_str,
(next_token.start_line, next_token.start_col),
(next_token.end_line, next_token.end_col),
next_line,
)
prev_token = token
continue
if token.type == TokenType.op and token_str == "...":
# Black doesn't have an ellipsis token yet, yield 3 DOTs instead # Black doesn't have an ellipsis token yet, yield 3 DOTs instead
assert token.start_line == token.end_line assert token.start_line == token.end_line
assert token.end_col == token.start_col + 3 assert token.end_col == token.start_col + 3
@ -222,9 +185,7 @@ def tokenize(source: str, grammar: Optional[Grammar] = None) -> Iterator[TokenIn
(token.end_line, end_col), (token.end_line, end_col),
source_line, source_line,
) )
prev_token = token else:
continue
yield ( yield (
TOKEN_TYPE_MAP[token.type], TOKEN_TYPE_MAP[token.type],
token_str, token_str,
@ -245,9 +206,7 @@ def printtoken(
) -> None: # for testing ) -> None: # for testing
(srow, scol) = srow_col (srow, scol) = srow_col
(erow, ecol) = erow_col (erow, ecol) = erow_col
print( print(f"{srow},{scol}-{erow},{ecol}:\t{tok_name[type]}\t{token!r}")
"%d,%d-%d,%d:\t%s\t%s" % (srow, scol, erow, ecol, tok_name[type], repr(token))
)
if __name__ == "__main__": # testing if __name__ == "__main__": # testing

View File

@ -268,11 +268,7 @@ def __init__(
def __repr__(self) -> str: def __repr__(self) -> str:
"""Return a canonical string representation.""" """Return a canonical string representation."""
assert self.type is not None assert self.type is not None
return "{}({}, {!r})".format( return f"{self.__class__.__name__}({type_repr(self.type)}, {self.children!r})"
self.__class__.__name__,
type_repr(self.type),
self.children,
)
def __str__(self) -> str: def __str__(self) -> str:
""" """
@ -421,10 +417,9 @@ def __repr__(self) -> str:
from .pgen2.token import tok_name from .pgen2.token import tok_name
assert self.type is not None assert self.type is not None
return "{}({}, {!r})".format( return (
self.__class__.__name__, f"{self.__class__.__name__}({tok_name.get(self.type, self.type)},"
tok_name.get(self.type, self.type), f" {self.value!r})"
self.value,
) )
def __str__(self) -> str: def __str__(self) -> str:
@ -527,7 +522,7 @@ def __repr__(self) -> str:
args = [type_repr(self.type), self.content, self.name] args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None: while args and args[-1] is None:
del args[-1] del args[-1]
return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, args))) return f"{self.__class__.__name__}({', '.join(map(repr, args))})"
def _submatch(self, node, results=None) -> bool: def _submatch(self, node, results=None) -> bool:
raise NotImplementedError raise NotImplementedError

View File

@ -89,6 +89,26 @@ async def func():
with (x, y) as z: with (x, y) as z:
pass pass
# don't remove the brackets here, it changes the meaning of the code.
# even though the code will always trigger a runtime error
with (name_5, name_4), name_5:
pass
def test_tuple_as_contextmanager():
from contextlib import nullcontext
try:
with (nullcontext(),nullcontext()),nullcontext():
pass
except TypeError:
# test passed
pass
else:
# this should be a type error
assert False
# output # output
@ -182,3 +202,23 @@ async def func():
# don't remove the brackets here, it changes the meaning of the code. # don't remove the brackets here, it changes the meaning of the code.
with (x, y) as z: with (x, y) as z:
pass pass
# don't remove the brackets here, it changes the meaning of the code.
# even though the code will always trigger a runtime error
with (name_5, name_4), name_5:
pass
def test_tuple_as_contextmanager():
from contextlib import nullcontext
try:
with (nullcontext(), nullcontext()), nullcontext():
pass
except TypeError:
# test passed
pass
else:
# this should be a type error
assert False

View File

@ -10,6 +10,7 @@ def g():
async def func(): async def func():
await ...
if test: if test:
out_batched = [ out_batched = [
i i
@ -42,6 +43,7 @@ def g():
async def func(): async def func():
await ...
if test: if test:
out_batched = [ out_batched = [
i i

View File

@ -119,7 +119,7 @@ def __init__(self) -> None:
if Version(imp_version("click")) >= Version("8.2.0"): if Version(imp_version("click")) >= Version("8.2.0"):
super().__init__() super().__init__()
else: else:
super().__init__(mix_stderr=False) super().__init__(mix_stderr=False) # type: ignore
def invokeBlack( def invokeBlack(
@ -422,21 +422,6 @@ def test_skip_magic_trailing_comma(self) -> None:
) )
self.assertEqual(expected, actual, msg) self.assertEqual(expected, actual, msg)
@patch("black.dump_to_file", dump_to_stderr)
def test_async_as_identifier(self) -> None:
source_path = get_case_path("miscellaneous", "async_as_identifier")
_, source, expected = read_data_from_file(source_path)
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major < 3 or (major <= 3 and minor < 7):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
# ensure black can parse this when the target is 3.6
self.invokeBlack([str(source_path), "--target-version", "py36"])
# but not on 3.7, because async/await is no longer an identifier
self.invokeBlack([str(source_path), "--target-version", "py37"], exit_code=123)
@patch("black.dump_to_file", dump_to_stderr) @patch("black.dump_to_file", dump_to_stderr)
def test_python37(self) -> None: def test_python37(self) -> None:
source_path = get_case_path("cases", "python37") source_path = get_case_path("cases", "python37")
@ -449,8 +434,6 @@ def test_python37(self) -> None:
black.assert_stable(source, actual, DEFAULT_MODE) black.assert_stable(source, actual, DEFAULT_MODE)
# ensure black can parse this when the target is 3.7 # ensure black can parse this when the target is 3.7
self.invokeBlack([str(source_path), "--target-version", "py37"]) self.invokeBlack([str(source_path), "--target-version", "py37"])
# but not on 3.6, because we use async as a reserved keyword
self.invokeBlack([str(source_path), "--target-version", "py36"], exit_code=123)
def test_tab_comment_indentation(self) -> None: def test_tab_comment_indentation(self) -> None:
contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n" contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n"
@ -1907,7 +1890,8 @@ def test_code_option_safe(self) -> None:
args = ["--safe", "--code", code] args = ["--safe", "--code", code]
result = CliRunner().invoke(black.main, args) result = CliRunner().invoke(black.main, args)
self.compare_results(result, error_msg, 123) assert error_msg == result.output
assert result.exit_code == 123
def test_code_option_fast(self) -> None: def test_code_option_fast(self) -> None:
"""Test that the code option ignores errors when the sanity checks fail.""" """Test that the code option ignores errors when the sanity checks fail."""
@ -2064,6 +2048,26 @@ def test_lines_with_leading_tabs_expanded(self) -> None:
assert lines_with_leading_tabs_expanded("\t\tx") == [f"{tab}{tab}x"] assert lines_with_leading_tabs_expanded("\t\tx") == [f"{tab}{tab}x"]
assert lines_with_leading_tabs_expanded("\tx\n y") == [f"{tab}x", " y"] assert lines_with_leading_tabs_expanded("\tx\n y") == [f"{tab}x", " y"]
def test_backslash_carriage_return(self) -> None:
# These tests are here instead of in the normal cases because
# of git's newline normalization and because it's hard to
# get `\r` vs `\r\n` vs `\n` to display properly in editors
assert black.format_str("x=\\\r\n1", mode=black.FileMode()) == "x = 1\n"
assert black.format_str("x=\\\n1", mode=black.FileMode()) == "x = 1\n"
assert black.format_str("x=\\\r1", mode=black.FileMode()) == "x = 1\n"
assert (
black.format_str("class A\\\r\n:...", mode=black.FileMode())
== "class A: ...\n"
)
assert (
black.format_str("class A\\\n:...", mode=black.FileMode())
== "class A: ...\n"
)
assert (
black.format_str("class A\\\r:...", mode=black.FileMode())
== "class A: ...\n"
)
class TestCaching: class TestCaching:
def test_get_cache_dir( def test_get_cache_dir(

24
tox.ini
View File

@ -13,18 +13,16 @@ skip_install = True
recreate = True recreate = True
deps = deps =
-r{toxinidir}/test_requirements.txt -r{toxinidir}/test_requirements.txt
; parallelization is disabled on CI because pytest-dev/pytest-xdist#620 occurs too frequently
; local runs can stay parallelized since they aren't rolling the dice so many times as like on CI
commands = commands =
pip install -e .[d] pip install -e .[d]
coverage erase coverage erase
pytest tests --run-optional no_jupyter \ pytest tests --run-optional no_jupyter \
!ci: --numprocesses auto \ --numprocesses auto \
--cov {posargs} --cov {posargs}
pip install -e .[jupyter] pip install -e .[jupyter]
pytest tests --run-optional jupyter \ pytest tests --run-optional jupyter \
-m jupyter \ -m jupyter \
!ci: --numprocesses auto \ --numprocesses auto \
--cov --cov-append {posargs} --cov --cov-append {posargs}
coverage report coverage report
@ -34,20 +32,15 @@ skip_install = True
recreate = True recreate = True
deps = deps =
-r{toxinidir}/test_requirements.txt -r{toxinidir}/test_requirements.txt
; a separate worker is required in ci due to https://foss.heptapod.net/pypy/pypy/-/issues/3317
; this seems to cause tox to wait forever
; remove this when pypy releases the bugfix
commands = commands =
pip install -e .[d] pip install -e .[d]
pytest tests \ pytest tests \
--run-optional no_jupyter \ --run-optional no_jupyter \
!ci: --numprocesses auto \ --numprocesses auto
ci: --numprocesses 1
pip install -e .[jupyter] pip install -e .[jupyter]
pytest tests --run-optional jupyter \ pytest tests --run-optional jupyter \
-m jupyter \ -m jupyter \
!ci: --numprocesses auto \ --numprocesses auto
ci: --numprocesses 1
[testenv:{,ci-}311] [testenv:{,ci-}311]
setenv = setenv =
@ -59,22 +52,17 @@ deps =
; We currently need > aiohttp 3.8.1 that is on PyPI for 3.11 ; We currently need > aiohttp 3.8.1 that is on PyPI for 3.11
git+https://github.com/aio-libs/aiohttp git+https://github.com/aio-libs/aiohttp
-r{toxinidir}/test_requirements.txt -r{toxinidir}/test_requirements.txt
; a separate worker is required in ci due to https://foss.heptapod.net/pypy/pypy/-/issues/3317
; this seems to cause tox to wait forever
; remove this when pypy releases the bugfix
commands = commands =
pip install -e .[d] pip install -e .[d]
coverage erase coverage erase
pytest tests \ pytest tests \
--run-optional no_jupyter \ --run-optional no_jupyter \
!ci: --numprocesses auto \ --numprocesses auto \
ci: --numprocesses 1 \
--cov {posargs} --cov {posargs}
pip install -e .[jupyter] pip install -e .[jupyter]
pytest tests --run-optional jupyter \ pytest tests --run-optional jupyter \
-m jupyter \ -m jupyter \
!ci: --numprocesses auto \ --numprocesses auto \
ci: --numprocesses 1 \
--cov --cov-append {posargs} --cov --cov-append {posargs}
coverage report coverage report