Compare commits
133 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7987951e24 | ||
![]() |
e5e5dad792 | ||
![]() |
24e4cb20ab | ||
![]() |
e7bf7b4619 | ||
![]() |
71e380aedf | ||
![]() |
2630801f95 | ||
![]() |
b0f36f5b42 | ||
![]() |
314f8cf92b | ||
![]() |
d0ff3bd6cb | ||
![]() |
a41dc89f1f | ||
![]() |
950ec38c11 | ||
![]() |
2c135edf37 | ||
![]() |
6144c46c6a | ||
![]() |
dd278cb316 | ||
![]() |
dbb14eac93 | ||
![]() |
5342d2eeda | ||
![]() |
9f38928414 | ||
![]() |
3e9dd25dad | ||
![]() |
bb802cf19a | ||
![]() |
5ae38dd370 | ||
![]() |
45cbe572ee | ||
![]() |
fccd70cff1 | ||
![]() |
00c0d6d91a | ||
![]() |
0580ecbef3 | ||
![]() |
ed64d89faa | ||
![]() |
452d3b68f4 | ||
![]() |
256f3420b1 | ||
![]() |
00cb6d15c5 | ||
![]() |
14e1de805a | ||
![]() |
5f23701708 | ||
![]() |
9c129567e7 | ||
![]() |
c02ca47daa | ||
![]() |
edaf085a18 | ||
![]() |
b844c8a136 | ||
![]() |
d82da0f0e9 | ||
![]() |
8a737e727a | ||
![]() |
d330deea00 | ||
![]() |
3d8129001f | ||
![]() |
459562c71a | ||
![]() |
99dbf3006b | ||
![]() |
c0b92f3888 | ||
![]() |
e58baf15b9 | ||
![]() |
1455ae4731 | ||
![]() |
584d0331c8 | ||
![]() |
6e9654065c | ||
![]() |
8dc912774e | ||
![]() |
40b73f2fb5 | ||
![]() |
e157ba4de5 | ||
![]() |
fdabd424e2 | ||
![]() |
9431e98522 | ||
![]() |
3b00112ac5 | ||
![]() |
0aabac4fe0 | ||
![]() |
ed33205579 | ||
![]() |
6000d37f09 | ||
![]() |
30759ca782 | ||
![]() |
84ac1a947d | ||
![]() |
0db1173bbc | ||
![]() |
3fab5ade71 | ||
![]() |
e54f86bae4 | ||
![]() |
96ca1b6be3 | ||
![]() |
17efac45f9 | ||
![]() |
73f651f02f | ||
![]() |
f6c7c98f34 | ||
![]() |
d670b0439c | ||
![]() |
56896264e4 | ||
![]() |
efd9778873 | ||
![]() |
c472557ba8 | ||
![]() |
53a219056d | ||
![]() |
c98fc0c128 | ||
![]() |
f54f34799b | ||
![]() |
484a669699 | ||
![]() |
fff747d61b | ||
![]() |
9995bffbe4 | ||
![]() |
7452902c77 | ||
![]() |
32ebb93003 | ||
![]() |
1b2427a2b7 | ||
![]() |
a22b1ebbfd | ||
![]() |
b7d0e7212b | ||
![]() |
f1a2f92bba | ||
![]() |
8d9d18c033 | ||
![]() |
bbfdba3a5e | ||
![]() |
8fb2add1f7 | ||
![]() |
2a45cecf29 | ||
![]() |
b4d6d8632d | ||
![]() |
ac018c16ca | ||
![]() |
058da5f81a | ||
![]() |
98a580bbdc | ||
![]() |
f50aba4984 | ||
![]() |
6b27ef53e2 | ||
![]() |
26aeebe9fb | ||
![]() |
9e13708be8 | ||
![]() |
ac28187bf4 | ||
![]() |
823a7b0ff0 | ||
![]() |
699b45aef7 | ||
![]() |
c20423249e | ||
![]() |
5ec91686ff | ||
![]() |
7e3e8f5bd9 | ||
![]() |
b965c2a502 | ||
![]() |
9ccf279a17 | ||
![]() |
14b6e61970 | ||
![]() |
b1c4dd96d7 | ||
![]() |
4b4ae43e8b | ||
![]() |
7fa1faf83a | ||
![]() |
8827accf56 | ||
![]() |
b0da11d370 | ||
![]() |
721dff5493 | ||
![]() |
7e2afc9bfd | ||
![]() |
1ad5263f2f | ||
![]() |
9ff047a957 | ||
![]() |
5e571ccbbe | ||
![]() |
978bc505ac | ||
![]() |
b1f7b9f87d | ||
![]() |
b677a643c5 | ||
![]() |
8447af4d8d | ||
![]() |
9c1fd463e1 | ||
![]() |
b9c63230b4 | ||
![]() |
4af12c499e | ||
![]() |
c827551b23 | ||
![]() |
3be19b306f | ||
![]() |
f71925885c | ||
![]() |
ccfb0db4d5 | ||
![]() |
c801cd60b1 | ||
![]() |
f22b2437d5 | ||
![]() |
75eb55764e | ||
![]() |
f2da85fe7f | ||
![]() |
0ab0b75717 | ||
![]() |
0c033f3eb7 | ||
![]() |
455de7703e | ||
![]() |
dbb956b0d3 | ||
![]() |
3702ba224e | ||
![]() |
e4aaa8a994 | ||
![]() |
ba88fc372e | ||
![]() |
5683242fd4 |
@ -1,4 +1,3 @@
|
||||
node: $Format:%H$
|
||||
node-date: $Format:%cI$
|
||||
describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
|
||||
ref-names: $Format:%D$
|
||||
describe-name: $Format:%(describe:tags=true,match=[0-9]*)$
|
||||
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1 +1,2 @@
|
||||
.git_archival.txt export-subst
|
||||
*.py diff=python
|
||||
|
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -12,7 +12,9 @@ current development version. To confirm this, you have three options:
|
||||
|
||||
1. Update Black's version if a newer release exists: `pip install -U black`
|
||||
2. Use the online formatter at <https://black.vercel.app/?version=main>, which will use
|
||||
the latest main branch.
|
||||
the latest main branch. Note that the online formatter currently runs on
|
||||
an older version of Python and may not support newer syntax, such as the
|
||||
extended f-string syntax added in Python 3.12.
|
||||
3. Or run _Black_ on your machine:
|
||||
- create a new virtualenv (make sure it's the same Python version);
|
||||
- clone this repository;
|
||||
|
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@ -14,4 +14,3 @@ updates:
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels: ["skip news", "C: dependencies", "T: documentation"]
|
||||
reviewers: ["ichard26"]
|
||||
|
26
.github/workflows/diff_shades.yml
vendored
26
.github/workflows/diff_shades.yml
vendored
@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
- name: Install diff-shades and support dependencies
|
||||
run: |
|
||||
python -m pip install 'click==8.1.3' packaging urllib3
|
||||
python -m pip install 'click>=8.1.7' packaging urllib3
|
||||
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
|
||||
|
||||
- name: Calculate run configuration & metadata
|
||||
@ -34,7 +34,8 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
run: >
|
||||
python scripts/diff_shades_gha_helper.py config ${{ github.event_name }} ${{ matrix.mode }}
|
||||
python scripts/diff_shades_gha_helper.py config ${{ github.event_name }}
|
||||
${{ matrix.mode }}
|
||||
|
||||
analysis:
|
||||
name: analysis / ${{ matrix.mode }}
|
||||
@ -44,11 +45,11 @@ jobs:
|
||||
HATCH_BUILD_HOOKS_ENABLE: "1"
|
||||
# Clang is less picky with the C code it's given than gcc (and may
|
||||
# generate faster binaries too).
|
||||
CC: clang-14
|
||||
CC: clang-18
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{ fromJson(needs.configure.outputs.matrix )}}
|
||||
include: ${{ fromJson(needs.configure.outputs.matrix) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout this repository (full clone)
|
||||
@ -64,7 +65,7 @@ jobs:
|
||||
- name: Install diff-shades and support dependencies
|
||||
run: |
|
||||
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
|
||||
python -m pip install 'click==8.1.3' packaging urllib3
|
||||
python -m pip install 'click>=8.1.7' packaging urllib3
|
||||
# After checking out old revisions, this might not exist so we'll use a copy.
|
||||
cat scripts/diff_shades_gha_helper.py > helper.py
|
||||
git config user.name "diff-shades-gha"
|
||||
@ -110,19 +111,19 @@ jobs:
|
||||
${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
|
||||
|
||||
- name: Upload diff report
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.mode }}-diff.html
|
||||
path: diff.html
|
||||
|
||||
- name: Upload baseline analysis
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.baseline-analysis }}
|
||||
path: ${{ matrix.baseline-analysis }}
|
||||
|
||||
- name: Upload target analysis
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target-analysis }}
|
||||
path: ${{ matrix.target-analysis }}
|
||||
@ -130,14 +131,13 @@ jobs:
|
||||
- name: Generate summary file (PR only)
|
||||
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
|
||||
run: >
|
||||
python helper.py comment-body
|
||||
${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
|
||||
${{ matrix.baseline-sha }} ${{ matrix.target-sha }}
|
||||
${{ github.event.pull_request.number }}
|
||||
python helper.py comment-body ${{ matrix.baseline-analysis }}
|
||||
${{ matrix.target-analysis }} ${{ matrix.baseline-sha }}
|
||||
${{ matrix.target-sha }} ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Upload summary file (PR only)
|
||||
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: .pr-comment.json
|
||||
path: .pr-comment.json
|
||||
|
8
.github/workflows/doc.yml
vendored
8
.github/workflows/doc.yml
vendored
@ -26,13 +26,15 @@ jobs:
|
||||
- name: Set up latest Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "*"
|
||||
python-version: "3.13"
|
||||
allow-prereleases: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install uv
|
||||
python -m uv pip install --system -e ".[d]"
|
||||
python -m uv pip install --system -r "docs/requirements.txt"
|
||||
python -m uv venv
|
||||
python -m uv pip install -e ".[d]"
|
||||
python -m uv pip install -r "docs/requirements.txt"
|
||||
|
||||
- name: Build documentation
|
||||
run: sphinx-build -a -b html -W --keep-going docs/ docs/_build
|
||||
|
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
||||
latest_non_release)" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@ -47,7 +47,7 @@ jobs:
|
||||
if:
|
||||
${{ github.event_name == 'release' && github.event.action == 'published' &&
|
||||
!github.event.release.prerelease }}
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
if:
|
||||
${{ github.event_name == 'release' && github.event.action == 'published' &&
|
||||
github.event.release.prerelease }}
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
3
.github/workflows/fuzz.yml
vendored
3
.github/workflows/fuzz.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12.4", "3.13"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -31,6 +31,7 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
allow-prereleases: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
3
.github/workflows/lint.yml
vendored
3
.github/workflows/lint.yml
vendored
@ -26,7 +26,8 @@ jobs:
|
||||
- name: Set up latest Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "*"
|
||||
python-version: "3.13"
|
||||
allow-prereleases: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
17
.github/workflows/pypi_upload.yml
vendored
17
.github/workflows/pypi_upload.yml
vendored
@ -23,7 +23,8 @@ jobs:
|
||||
- name: Set up latest Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "*"
|
||||
python-version: "3.13"
|
||||
allow-prereleases: true
|
||||
|
||||
- name: Install latest pip, build, twine
|
||||
run: |
|
||||
@ -46,10 +47,11 @@ jobs:
|
||||
include: ${{ steps.set-matrix.outputs.include }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
# Keep cibuildwheel version in sync with below
|
||||
- name: Install cibuildwheel and pypyp
|
||||
run: |
|
||||
pipx install cibuildwheel==2.15.0
|
||||
pipx install pypyp==1
|
||||
pipx install cibuildwheel==2.22.0
|
||||
pipx install pypyp==1.3.0
|
||||
- name: generate matrix
|
||||
if: github.event_name != 'pull_request'
|
||||
run: |
|
||||
@ -73,7 +75,7 @@ jobs:
|
||||
| pyp 'json.dumps({"only": x, "os": "ubuntu-latest"})'
|
||||
} | pyp 'json.dumps(list(map(json.loads, lines)))' > /tmp/matrix
|
||||
env:
|
||||
CIBW_BUILD: "cp38-* cp312-*"
|
||||
CIBW_BUILD: "cp39-* cp313-*"
|
||||
CIBW_ARCHS_LINUX: x86_64
|
||||
- id: set-matrix
|
||||
run: echo "include=$(cat /tmp/matrix)" | tee -a $GITHUB_OUTPUT
|
||||
@ -89,14 +91,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: pypa/cibuildwheel@v2.17.0
|
||||
# Keep cibuildwheel version in sync with above
|
||||
- uses: pypa/cibuildwheel@v2.23.3
|
||||
with:
|
||||
only: ${{ matrix.only }}
|
||||
|
||||
- name: Upload wheels as workflow artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.name }}-mypyc-wheels
|
||||
name: ${{ matrix.only }}-mypyc-wheels
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
- if: github.event_name == 'release'
|
||||
|
2
.github/workflows/release_tests.yml
vendored
2
.github/workflows/release_tests.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.12"]
|
||||
python-version: ["3.13"]
|
||||
os: [macOS-latest, ubuntu-latest, windows-latest]
|
||||
|
||||
steps:
|
||||
|
9
.github/workflows/test.yml
vendored
9
.github/workflows/test.yml
vendored
@ -31,7 +31,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "pypy-3.9"]
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12.4", "3.13", "pypy-3.9"]
|
||||
os: [ubuntu-latest, macOS-latest, windows-latest]
|
||||
|
||||
steps:
|
||||
@ -41,6 +41,7 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
allow-prereleases: true
|
||||
|
||||
- name: Install tox
|
||||
run: |
|
||||
@ -62,7 +63,7 @@ jobs:
|
||||
if:
|
||||
github.repository == 'psf/black' && matrix.os == 'ubuntu-latest' &&
|
||||
!startsWith(matrix.python-version, 'pypy')
|
||||
uses: AndreMiras/coveralls-python-action@8799c9f4443ac4201d2e2f2c725d577174683b99
|
||||
uses: AndreMiras/coveralls-python-action@ac868b9540fad490f7ca82b8ca00480fd751ed19
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
parallel: true
|
||||
@ -77,7 +78,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Send finished signal to Coveralls
|
||||
uses: AndreMiras/coveralls-python-action@8799c9f4443ac4201d2e2f2c725d577174683b99
|
||||
uses: AndreMiras/coveralls-python-action@ac868b9540fad490f7ca82b8ca00480fd751ed19
|
||||
with:
|
||||
parallel-finished: true
|
||||
debug: true
|
||||
@ -98,7 +99,7 @@ jobs:
|
||||
- name: Set up latest Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "*"
|
||||
python-version: "3.12.4"
|
||||
|
||||
- name: Install black with uvloop
|
||||
run: |
|
||||
|
6
.github/workflows/upload_binary.yml
vendored
6
.github/workflows/upload_binary.yml
vendored
@ -13,13 +13,13 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [windows-2019, ubuntu-20.04, macos-latest]
|
||||
os: [windows-2019, ubuntu-22.04, macos-latest]
|
||||
include:
|
||||
- os: windows-2019
|
||||
pathsep: ";"
|
||||
asset_name: black_windows.exe
|
||||
executable_mime: "application/vnd.microsoft.portable-executable"
|
||||
- os: ubuntu-20.04
|
||||
- os: ubuntu-22.04
|
||||
pathsep: ":"
|
||||
asset_name: black_linux
|
||||
executable_mime: "application/x-executable"
|
||||
@ -34,7 +34,7 @@ jobs:
|
||||
- name: Set up latest Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "*"
|
||||
python-version: "3.12.4"
|
||||
|
||||
- name: Install Black and PyInstaller
|
||||
run: |
|
||||
|
@ -24,12 +24,12 @@ repos:
|
||||
additional_dependencies: *version_check_dependencies
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.13.2
|
||||
rev: 6.0.1
|
||||
hooks:
|
||||
- id: isort
|
||||
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 7.0.0
|
||||
rev: 7.2.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies:
|
||||
@ -39,17 +39,21 @@ repos:
|
||||
exclude: ^src/blib2to3/
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.9.0
|
||||
rev: v1.15.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
exclude: ^(docs/conf.py|scripts/generate_schema.py)$
|
||||
args: []
|
||||
additional_dependencies: &mypy_deps
|
||||
- types-PyYAML
|
||||
- types-atheris
|
||||
- tomli >= 0.2.6, < 2.0.0
|
||||
- click >= 8.1.0, != 8.1.4, != 8.1.5
|
||||
- click >= 8.2.0
|
||||
# Click is intentionally out-of-sync with pyproject.toml
|
||||
# v8.2 has breaking changes. We work around them at runtime, but we need the newer stubs.
|
||||
- packaging >= 22.0
|
||||
- platformdirs >= 2.1.0
|
||||
- pytokens >= 0.1.10
|
||||
- pytest
|
||||
- hypothesis
|
||||
- aiohttp >= 3.7.4
|
||||
@ -62,14 +66,15 @@ repos:
|
||||
args: ["--python-version=3.10"]
|
||||
additional_dependencies: *mypy_deps
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: v4.0.0-alpha.8
|
||||
- repo: https://github.com/rbubley/mirrors-prettier
|
||||
rev: v3.5.3
|
||||
hooks:
|
||||
- id: prettier
|
||||
types_or: [markdown, yaml, json]
|
||||
exclude: \.github/workflows/diff_shades\.yml
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
|
@ -16,3 +16,6 @@ python:
|
||||
path: .
|
||||
extra_requirements:
|
||||
- d
|
||||
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
@ -181,6 +181,7 @@ Multiple contributions by:
|
||||
- [Tony Narlock](mailto:tony@git-pull.com)
|
||||
- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com)
|
||||
- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu)
|
||||
- [Tushar Sadhwani](mailto:tushar.sadhwani000@gmail.com)
|
||||
- [Tzu-ping Chung](mailto:uranusjr@gmail.com)
|
||||
- [Utsav Shah](mailto:ukshah2@illinois.edu)
|
||||
- utsav-dbx
|
||||
|
202
CHANGES.md
202
CHANGES.md
@ -1,5 +1,207 @@
|
||||
# Change Log
|
||||
|
||||
## Unreleased
|
||||
|
||||
### Highlights
|
||||
|
||||
<!-- Include any especially major or disruptive changes here -->
|
||||
|
||||
### Stable style
|
||||
|
||||
<!-- Changes that affect Black's stable style -->
|
||||
|
||||
- Fix crash while formatting a long `del` statement containing tuples (#4628)
|
||||
- Fix crash while formatting expressions using the walrus operator in complex `with`
|
||||
statements (#4630)
|
||||
- Handle `# fmt: skip` followed by a comment at the end of file (#4635)
|
||||
- Fix crash when a tuple appears in the `as` clause of a `with` statement (#4634)
|
||||
- Fix crash when tuple is used as a context manager inside a `with` statement (#4646)
|
||||
- Fix crash on a `\\r\n` (#4673)
|
||||
- Fix crash on `await ...` (where `...` is a literal `Ellipsis`) (#4676)
|
||||
- Remove support for pre-python 3.7 `await/async` as soft keywords/variable names
|
||||
(#4676)
|
||||
|
||||
### Preview style
|
||||
|
||||
<!-- Changes that affect Black's preview style -->
|
||||
|
||||
- Fix a bug where one-liner functions/conditionals marked with `# fmt: skip` would still
|
||||
be formatted (#4552)
|
||||
|
||||
### Configuration
|
||||
|
||||
<!-- Changes to how Black can be configured -->
|
||||
|
||||
### Packaging
|
||||
|
||||
<!-- Changes to how Black is packaged, such as dependency requirements -->
|
||||
|
||||
### Parser
|
||||
|
||||
<!-- Changes to the parser or to version autodetection -->
|
||||
|
||||
- Rewrite tokenizer to improve performance and compliance (#4536)
|
||||
- Fix bug where certain unusual expressions (e.g., lambdas) were not accepted in type
|
||||
parameter bounds and defaults. (#4602)
|
||||
|
||||
### Performance
|
||||
|
||||
<!-- Changes that improve Black's performance. -->
|
||||
|
||||
### Output
|
||||
|
||||
<!-- Changes to Black's terminal output and error messages -->
|
||||
|
||||
### _Blackd_
|
||||
|
||||
<!-- Changes to blackd -->
|
||||
|
||||
### Integrations
|
||||
|
||||
<!-- For example, Docker, GitHub Actions, pre-commit, editors -->
|
||||
|
||||
- Fix the version check in the vim file to reject Python 3.8 (#4567)
|
||||
- Enhance GitHub Action `psf/black` to read Black version from an additional section in
|
||||
pyproject.toml: `[project.dependency-groups]` (#4606)
|
||||
|
||||
### Documentation
|
||||
|
||||
<!-- Major changes to documentation and policies. Small docs changes
|
||||
don't need a changelog entry. -->
|
||||
|
||||
## 25.1.0
|
||||
|
||||
### Highlights
|
||||
|
||||
This release introduces the new 2025 stable style (#4558), stabilizing the following
|
||||
changes:
|
||||
|
||||
- Normalize casing of Unicode escape characters in strings to lowercase (#2916)
|
||||
- Fix inconsistencies in whether certain strings are detected as docstrings (#4095)
|
||||
- Consistently add trailing commas to typed function parameters (#4164)
|
||||
- Remove redundant parentheses in if guards for case blocks (#4214)
|
||||
- Add parentheses to if clauses in case blocks when the line is too long (#4269)
|
||||
- Whitespace before `# fmt: skip` comments is no longer normalized (#4146)
|
||||
- Fix line length computation for certain expressions that involve the power operator
|
||||
(#4154)
|
||||
- Check if there is a newline before the terminating quotes of a docstring (#4185)
|
||||
- Fix type annotation spacing between `*` and more complex type variable tuple (#4440)
|
||||
|
||||
The following changes were not in any previous release:
|
||||
|
||||
- Remove parentheses around sole list items (#4312)
|
||||
- Generic function definitions are now formatted more elegantly: parameters are split
|
||||
over multiple lines first instead of type parameter definitions (#4553)
|
||||
|
||||
### Stable style
|
||||
|
||||
- Fix formatting cells in IPython notebooks with magic methods and starting or trailing
|
||||
empty lines (#4484)
|
||||
- Fix crash when formatting `with` statements containing tuple generators/unpacking
|
||||
(#4538)
|
||||
|
||||
### Preview style
|
||||
|
||||
- Fix/remove string merging changing f-string quotes on f-strings with internal quotes
|
||||
(#4498)
|
||||
- Collapse multiple empty lines after an import into one (#4489)
|
||||
- Prevent `string_processing` and `wrap_long_dict_values_in_parens` from removing
|
||||
parentheses around long dictionary values (#4377)
|
||||
- Move `wrap_long_dict_values_in_parens` from the unstable to preview style (#4561)
|
||||
|
||||
### Packaging
|
||||
|
||||
- Store license identifier inside the `License-Expression` metadata field, see
|
||||
[PEP 639](https://peps.python.org/pep-0639/). (#4479)
|
||||
|
||||
### Performance
|
||||
|
||||
- Speed up the `is_fstring_start` function in Black's tokenizer (#4541)
|
||||
|
||||
### Integrations
|
||||
|
||||
- If using stdin with `--stdin-filename` set to a force excluded path, stdin won't be
|
||||
formatted. (#4539)
|
||||
|
||||
## 24.10.0
|
||||
|
||||
### Highlights
|
||||
|
||||
- Black is now officially tested with Python 3.13 and provides Python 3.13
|
||||
mypyc-compiled wheels. (#4436) (#4449)
|
||||
- Black will issue an error when used with Python 3.12.5, due to an upstream memory
|
||||
safety issue in Python 3.12.5 that can cause Black's AST safety checks to fail. Please
|
||||
use Python 3.12.6 or Python 3.12.4 instead. (#4447)
|
||||
- Black no longer supports running with Python 3.8 (#4452)
|
||||
|
||||
### Stable style
|
||||
|
||||
- Fix crashes involving comments in parenthesised return types or `X | Y` style unions.
|
||||
(#4453)
|
||||
- Fix skipping Jupyter cells with unknown `%%` magic (#4462)
|
||||
|
||||
### Preview style
|
||||
|
||||
- Fix type annotation spacing between * and more complex type variable tuple (i.e. `def
|
||||
fn(*args: *tuple[*Ts, T]) -> None: pass`) (#4440)
|
||||
|
||||
### Caching
|
||||
|
||||
- Fix bug where the cache was shared between runs with and without `--unstable` (#4466)
|
||||
|
||||
### Packaging
|
||||
|
||||
- Upgrade version of mypyc used to 1.12 beta (#4450) (#4449)
|
||||
- `blackd` now requires a newer version of aiohttp. (#4451)
|
||||
|
||||
### Output
|
||||
|
||||
- Added Python target version information on parse error (#4378)
|
||||
- Add information about Black version to internal error messages (#4457)
|
||||
|
||||
## 24.8.0
|
||||
|
||||
### Stable style
|
||||
|
||||
- Fix crash when `# fmt: off` is used before a closing parenthesis or bracket. (#4363)
|
||||
|
||||
### Packaging
|
||||
|
||||
- Packaging metadata updated: docs are explictly linked, the issue tracker is now also
|
||||
linked. This improves the PyPI listing for Black. (#4345)
|
||||
|
||||
### Parser
|
||||
|
||||
- Fix regression where Black failed to parse a multiline f-string containing another
|
||||
multiline string (#4339)
|
||||
- Fix regression where Black failed to parse an escaped single quote inside an f-string
|
||||
(#4401)
|
||||
- Fix bug with Black incorrectly parsing empty lines with a backslash (#4343)
|
||||
- Fix bugs with Black's tokenizer not handling `\{` inside f-strings very well (#4422)
|
||||
- Fix incorrect line numbers in the tokenizer for certain tokens within f-strings
|
||||
(#4423)
|
||||
|
||||
### Performance
|
||||
|
||||
- Improve performance when a large directory is listed in `.gitignore` (#4415)
|
||||
|
||||
### _Blackd_
|
||||
|
||||
- Fix blackd (and all extras installs) for docker container (#4357)
|
||||
|
||||
## 24.4.2
|
||||
|
||||
This is a bugfix release to fix two regressions in the new f-string parser introduced in
|
||||
24.4.1.
|
||||
|
||||
### Parser
|
||||
|
||||
- Fix regression where certain complex f-strings failed to parse (#4332)
|
||||
|
||||
### Performance
|
||||
|
||||
- Fix bad performance on certain complex string literals (#4331)
|
||||
|
||||
## 24.4.1
|
||||
|
||||
### Highlights
|
||||
|
@ -1,10 +1,13 @@
|
||||
# Contributing to _Black_
|
||||
|
||||
Welcome! Happy to see you willing to make the project better. Have you read the entire
|
||||
[user documentation](https://black.readthedocs.io/en/latest/) yet?
|
||||
Welcome future contributor! We're happy to see you willing to make the project better.
|
||||
|
||||
Our [contributing documentation](https://black.readthedocs.org/en/latest/contributing/)
|
||||
contains details on all you need to know about contributing to _Black_, the basics to
|
||||
the internals of _Black_.
|
||||
If you aren't familiar with _Black_, or are looking for documentation on something
|
||||
specific, the [user documentation](https://black.readthedocs.io/en/latest/) is the best
|
||||
place to look.
|
||||
|
||||
We look forward to your contributions!
|
||||
For getting started on contributing, please read the
|
||||
[contributing documentation](https://black.readthedocs.org/en/latest/contributing/) for
|
||||
all you need to know.
|
||||
|
||||
Thank you, and we look forward to your contributions!
|
||||
|
@ -10,7 +10,8 @@ RUN python -m venv $VIRTUAL_ENV
|
||||
RUN python -m pip install --no-cache-dir hatch hatch-fancy-pypi-readme hatch-vcs
|
||||
RUN . /opt/venv/bin/activate && pip install --no-cache-dir --upgrade pip setuptools \
|
||||
&& cd /src && hatch build -t wheel \
|
||||
&& pip install --no-cache-dir dist/*-cp*[colorama,d,uvloop]
|
||||
&& pip install --no-cache-dir dist/*-cp* \
|
||||
&& pip install black[colorama,d,uvloop]
|
||||
|
||||
FROM python:3.12-slim
|
||||
|
||||
|
@ -38,7 +38,7 @@ Try it out now using the [Black Playground](https://black.vercel.app). Watch the
|
||||
|
||||
### Installation
|
||||
|
||||
_Black_ can be installed by running `pip install black`. It requires Python 3.8+ to run.
|
||||
_Black_ can be installed by running `pip install black`. It requires Python 3.9+ to run.
|
||||
If you want to format Jupyter Notebooks, install with `pip install "black[jupyter]"`.
|
||||
|
||||
If you can't wait for the latest _hotness_ and want to install from GitHub, use:
|
||||
@ -137,8 +137,8 @@ SQLAlchemy, Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtuale
|
||||
pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant,
|
||||
Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more.
|
||||
|
||||
The following organizations use _Black_: Facebook, Dropbox, KeepTruckin, Lyft, Mozilla,
|
||||
Quora, Duolingo, QuantumBlack, Tesla, Archer Aviation.
|
||||
The following organizations use _Black_: Dropbox, KeepTruckin, Lyft, Mozilla, Quora,
|
||||
Duolingo, QuantumBlack, Tesla, Archer Aviation.
|
||||
|
||||
Are we missing anyone? Let us know.
|
||||
|
||||
|
@ -71,6 +71,7 @@ def read_version_specifier_from_pyproject() -> str:
|
||||
return f"=={version}"
|
||||
|
||||
arrays = [
|
||||
*pyproject.get("dependency-groups", {}).values(),
|
||||
pyproject.get("project", {}).get("dependencies"),
|
||||
*pyproject.get("project", {}).get("optional-dependencies", {}).values(),
|
||||
]
|
||||
|
@ -75,8 +75,8 @@ def _initialize_black_env(upgrade=False):
|
||||
return True
|
||||
|
||||
pyver = sys.version_info[:3]
|
||||
if pyver < (3, 8):
|
||||
print("Sorry, Black requires Python 3.8+ to run.")
|
||||
if pyver < (3, 9):
|
||||
print("Sorry, Black requires Python 3.9+ to run.")
|
||||
return False
|
||||
|
||||
from pathlib import Path
|
||||
|
27
docs/conf.py
27
docs/conf.py
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
@ -14,22 +13,46 @@
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import string
|
||||
from importlib.metadata import version
|
||||
from pathlib import Path
|
||||
|
||||
from sphinx.application import Sphinx
|
||||
|
||||
CURRENT_DIR = Path(__file__).parent
|
||||
|
||||
|
||||
def make_pypi_svg(version: str) -> None:
|
||||
template: Path = CURRENT_DIR / "_static" / "pypi_template.svg"
|
||||
target: Path = CURRENT_DIR / "_static" / "pypi.svg"
|
||||
with open(str(template), "r", encoding="utf8") as f:
|
||||
with open(str(template), encoding="utf8") as f:
|
||||
svg: str = string.Template(f.read()).substitute(version=version)
|
||||
with open(str(target), "w", encoding="utf8") as f:
|
||||
f.write(svg)
|
||||
|
||||
|
||||
def replace_pr_numbers_with_links(content: str) -> str:
|
||||
"""Replaces all PR numbers with the corresponding GitHub link."""
|
||||
return re.sub(r"#(\d+)", r"[#\1](https://github.com/psf/black/pull/\1)", content)
|
||||
|
||||
|
||||
def handle_include_read(
|
||||
app: Sphinx,
|
||||
relative_path: Path,
|
||||
parent_docname: str,
|
||||
content: list[str],
|
||||
) -> None:
|
||||
"""Handler for the include-read sphinx event."""
|
||||
if parent_docname == "change_log":
|
||||
content[0] = replace_pr_numbers_with_links(content[0])
|
||||
|
||||
|
||||
def setup(app: Sphinx) -> None:
|
||||
"""Sets up a minimal sphinx extension."""
|
||||
app.connect("include-read", handle_include_read)
|
||||
|
||||
|
||||
# Necessary so Click doesn't hit an encode error when called by
|
||||
# sphinxcontrib-programoutput on Windows.
|
||||
os.putenv("pythonioencoding", "utf-8")
|
||||
|
@ -7,7 +7,14 @@ An overview on contributing to the _Black_ project.
|
||||
Development on the latest version of Python is preferred. You can use any operating
|
||||
system.
|
||||
|
||||
Install development dependencies inside a virtual environment of your choice, for
|
||||
First clone the _Black_ repository:
|
||||
|
||||
```console
|
||||
$ git clone https://github.com/psf/black.git
|
||||
$ cd black
|
||||
```
|
||||
|
||||
Then install development dependencies inside a virtual environment of your choice, for
|
||||
example:
|
||||
|
||||
```console
|
||||
@ -16,7 +23,7 @@ $ source .venv/bin/activate # activation for linux and mac
|
||||
$ .venv\Scripts\activate # activation for windows
|
||||
|
||||
(.venv)$ pip install -r test_requirements.txt
|
||||
(.venv)$ pip install -e .[d]
|
||||
(.venv)$ pip install -e ".[d]"
|
||||
(.venv)$ pre-commit install
|
||||
```
|
||||
|
||||
@ -48,13 +55,16 @@ Further examples of invoking the tests
|
||||
# Run tests on a specific python version
|
||||
(.venv)$ tox -e py39
|
||||
|
||||
# pass arguments to pytest
|
||||
# Run an individual test
|
||||
(.venv)$ pytest -k <test name>
|
||||
|
||||
# Pass arguments to pytest
|
||||
(.venv)$ tox -e py -- --no-cov
|
||||
|
||||
# print full tree diff, see documentation below
|
||||
# Print full tree diff, see documentation below
|
||||
(.venv)$ tox -e py -- --print-full-tree
|
||||
|
||||
# disable diff printing, see documentation below
|
||||
# Disable diff printing, see documentation below
|
||||
(.venv)$ tox -e py -- --print-tree-diff=False
|
||||
```
|
||||
|
||||
@ -99,16 +109,22 @@ default. To turn it off pass `--print-tree-diff=False`.
|
||||
`Black` has CI that will check for an entry corresponding to your PR in `CHANGES.md`. If
|
||||
you feel this PR does not require a changelog entry please state that in a comment and a
|
||||
maintainer can add a `skip news` label to make the CI pass. Otherwise, please ensure you
|
||||
have a line in the following format:
|
||||
have a line in the following format added below the appropriate header:
|
||||
|
||||
```md
|
||||
- `Black` is now more awesome (#X)
|
||||
```
|
||||
|
||||
<!---
|
||||
The Next PR Number link uses HTML because of a bug in MyST-Parser that double-escapes the ampersand, causing the query parameters to not be processed.
|
||||
MyST-Parser issue: https://github.com/executablebooks/MyST-Parser/issues/760
|
||||
MyST-Parser stalled fix PR: https://github.com/executablebooks/MyST-Parser/pull/929
|
||||
-->
|
||||
|
||||
Note that X should be your PR number, not issue number! To workout X, please use
|
||||
[Next PR Number](https://ichard26.github.io/next-pr-number/?owner=psf&name=black). This
|
||||
is not perfect but saves a lot of release overhead as now the releaser does not need to
|
||||
go back and workout what to add to the `CHANGES.md` for each release.
|
||||
<a href="https://ichard26.github.io/next-pr-number/?owner=psf&name=black">Next PR
|
||||
Number</a>. This is not perfect but saves a lot of release overhead as now the releaser
|
||||
does not need to go back and workout what to add to the `CHANGES.md` for each release.
|
||||
|
||||
### Style Changes
|
||||
|
||||
@ -116,7 +132,7 @@ If a change would affect the advertised code style, please modify the documentat
|
||||
_Black_ code style) to reflect that change. Patches that fix unintended bugs in
|
||||
formatting don't need to be mentioned separately though. If the change is implemented
|
||||
with the `--preview` flag, please include the change in the future style document
|
||||
instead and write the changelog entry under a dedicated "Preview changes" heading.
|
||||
instead and write the changelog entry under the dedicated "Preview style" heading.
|
||||
|
||||
### Docs Testing
|
||||
|
||||
@ -124,17 +140,17 @@ If you make changes to docs, you can test they still build locally too.
|
||||
|
||||
```console
|
||||
(.venv)$ pip install -r docs/requirements.txt
|
||||
(.venv)$ pip install -e .[d]
|
||||
(.venv)$ pip install -e ".[d]"
|
||||
(.venv)$ sphinx-build -a -b html -W docs/ docs/_build/
|
||||
```
|
||||
|
||||
## Hygiene
|
||||
|
||||
If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug,
|
||||
run it again to confirm it's really fixed.
|
||||
and run the test again to confirm it's really fixed.
|
||||
|
||||
If adding a new feature, add a test. In fact, always add a test. But wait, before adding
|
||||
any large feature, first open an issue for us to discuss the idea first.
|
||||
If adding a new feature, add a test. In fact, always add a test. If adding a large
|
||||
feature, please first open an issue to discuss it beforehand.
|
||||
|
||||
## Finally
|
||||
|
||||
|
13
docs/faq.md
13
docs/faq.md
@ -84,16 +84,19 @@ See [Using _Black_ with other tools](labels/why-pycodestyle-warnings).
|
||||
|
||||
## Which Python versions does Black support?
|
||||
|
||||
Currently the runtime requires Python 3.8-3.11. Formatting is supported for files
|
||||
containing syntax from Python 3.3 to 3.11. We promise to support at least all Python
|
||||
versions that have not reached their end of life. This is the case for both running
|
||||
_Black_ and formatting code.
|
||||
_Black_ generally supports all Python versions supported by CPython (see
|
||||
[the Python devguide](https://devguide.python.org/versions/) for current information).
|
||||
We promise to support at least all Python versions that have not reached their end of
|
||||
life. This is the case for both running _Black_ and formatting code.
|
||||
|
||||
Support for formatting Python 2 code was removed in version 22.0. While we've made no
|
||||
plans to stop supporting older Python 3 minor versions immediately, their support might
|
||||
also be removed some time in the future without a deprecation period.
|
||||
|
||||
Runtime support for 3.7 was removed in version 23.7.0.
|
||||
`await`/`async` as soft keywords/indentifiers are no longer supported as of 25.2.0.
|
||||
|
||||
Runtime support for 3.6 was removed in version 22.10.0, for 3.7 in version 23.7.0, and
|
||||
for 3.8 in version 24.10.0.
|
||||
|
||||
## Why does my linter or typechecker complain after I format my code?
|
||||
|
||||
|
@ -16,7 +16,7 @@ Also, you can try out _Black_ online for minimal fuss on the
|
||||
|
||||
## Installation
|
||||
|
||||
_Black_ can be installed by running `pip install black`. It requires Python 3.8+ to run.
|
||||
_Black_ can be installed by running `pip install black`. It requires Python 3.9+ to run.
|
||||
If you want to format Jupyter Notebooks, install with `pip install "black[jupyter]"`.
|
||||
|
||||
If you use pipx, you can install Black with `pipx install black`.
|
||||
|
@ -236,7 +236,7 @@ Configuration:
|
||||
|
||||
#### Installation
|
||||
|
||||
This plugin **requires Vim 7.0+ built with Python 3.8+ support**. It needs Python 3.8 to
|
||||
This plugin **requires Vim 7.0+ built with Python 3.9+ support**. It needs Python 3.9 to
|
||||
be able to run _Black_ inside the Vim process which is much faster than calling an
|
||||
external command.
|
||||
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: psf/black@stable
|
||||
```
|
||||
|
||||
@ -37,10 +37,10 @@ the `pyproject.toml` file. `version` can be any
|
||||
[valid version specifier](https://packaging.python.org/en/latest/glossary/#term-Version-Specifier)
|
||||
or just the version number if you want an exact version. To read the version from the
|
||||
`pyproject.toml` file instead, set `use_pyproject` to `true`. This will first look into
|
||||
the `tool.black.required-version` field, then the `project.dependencies` array and
|
||||
finally the `project.optional-dependencies` table. The action defaults to the latest
|
||||
release available on PyPI. Only versions available from PyPI are supported, so no commit
|
||||
SHAs or branch names.
|
||||
the `tool.black.required-version` field, then the `dependency-groups` table, then the
|
||||
`project.dependencies` array and finally the `project.optional-dependencies` table. The
|
||||
action defaults to the latest release available on PyPI. Only versions available from
|
||||
PyPI are supported, so no commit SHAs or branch names.
|
||||
|
||||
If you want to include Jupyter Notebooks, _Black_ must be installed with the `jupyter`
|
||||
extra. Installing the extra and including Jupyter Notebook files can be configured via
|
||||
@ -74,9 +74,14 @@ If you want to match versions covered by Black's
|
||||
version: "~= 22.0"
|
||||
```
|
||||
|
||||
If you want to read the version from `pyproject.toml`, set `use_pyproject` to `true`:
|
||||
If you want to read the version from `pyproject.toml`, set `use_pyproject` to `true`.
|
||||
Note that this requires Python >= 3.11, so using the setup-python action may be
|
||||
required, for example:
|
||||
|
||||
```yaml
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
- uses: psf/black@stable
|
||||
with:
|
||||
options: "--check --verbose"
|
||||
|
@ -8,7 +8,7 @@ Use [pre-commit](https://pre-commit.com/). Once you
|
||||
repos:
|
||||
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 24.4.1
|
||||
rev: 25.1.0
|
||||
hooks:
|
||||
- id: black
|
||||
# It is recommended to specify the latest version of Python
|
||||
@ -35,7 +35,7 @@ include Jupyter Notebooks. To use this hook, simply replace the hook's `id: blac
|
||||
repos:
|
||||
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 24.4.1
|
||||
rev: 25.1.0
|
||||
hooks:
|
||||
- id: black-jupyter
|
||||
# It is recommended to specify the latest version of Python
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Used by ReadTheDocs; pinned requirements for stability.
|
||||
|
||||
myst-parser==2.0.0
|
||||
Sphinx==7.3.7
|
||||
myst-parser==4.0.1
|
||||
Sphinx==8.2.3
|
||||
# Older versions break Sphinx even though they're declared to be supported.
|
||||
docutils==0.20.1
|
||||
sphinxcontrib-programoutput==0.17
|
||||
docutils==0.21.2
|
||||
sphinxcontrib-programoutput==0.18
|
||||
sphinx_copybutton==0.5.2
|
||||
furo==2024.1.29
|
||||
furo==2024.8.6
|
||||
|
@ -250,6 +250,11 @@ exception of [capital "R" prefixes](#rstrings-and-rstrings), unicode literal mar
|
||||
(`u`) are removed because they are meaningless in Python 3, and in the case of multiple
|
||||
characters "r" is put first as in spoken language: "raw f-string".
|
||||
|
||||
Another area where Python allows multiple ways to format a string is escape sequences.
|
||||
For example, `"\uabcd"` and `"\uABCD"` evaluate to the same string. _Black_ normalizes
|
||||
such escape sequences to lowercase, but uses uppercase for `\N` named character escapes,
|
||||
such as `"\N{MEETEI MAYEK LETTER HUK}"`.
|
||||
|
||||
The main reason to standardize on a single form of quotes is aesthetics. Having one kind
|
||||
of quotes everywhere reduces reader distraction. It will also enable a future version of
|
||||
_Black_ to merge consecutive string literals that ended up on the same line (see
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
## Preview style
|
||||
|
||||
(labels/preview-style)=
|
||||
|
||||
Experimental, potentially disruptive style changes are gathered under the `--preview`
|
||||
CLI flag. At the end of each year, these changes may be adopted into the default style,
|
||||
as described in [The Black Code Style](index.md). Because the functionality is
|
||||
@ -20,22 +22,13 @@ demoted from the `--preview` to the `--unstable` style, users can use the
|
||||
|
||||
Currently, the following features are included in the preview style:
|
||||
|
||||
- `hex_codes_in_unicode_sequences`: normalize casing of Unicode escape characters in
|
||||
strings
|
||||
- `unify_docstring_detection`: fix inconsistencies in whether certain strings are
|
||||
detected as docstrings
|
||||
- `no_normalize_fmt_skip_whitespace`: whitespace before `# fmt: skip` comments is no
|
||||
longer normalized
|
||||
- `typed_params_trailing_comma`: consistently add trailing commas to typed function
|
||||
parameters
|
||||
- `is_simple_lookup_for_doublestar_expression`: fix line length computation for certain
|
||||
expressions that involve the power operator
|
||||
- `docstring_check_for_newline`: checks if there is a newline before the terminating
|
||||
quotes of a docstring
|
||||
- `remove_redundant_guard_parens`: Removes redundant parentheses in `if` guards for
|
||||
`case` blocks.
|
||||
- `parens_for_long_if_clauses_in_case_block`: Adds parentheses to `if` clauses in `case`
|
||||
blocks when the line is too long
|
||||
- `always_one_newline_after_import`: Always force one blank line after import
|
||||
statements, except when the line after the import is a comment or an import statement
|
||||
- `wrap_long_dict_values_in_parens`: Add parentheses around long values in dictionaries
|
||||
([see below](labels/wrap-long-dict-values))
|
||||
- `fix_fmt_skip_in_one_liners`: Fix `# fmt: skip` behaviour on one-liner declarations,
|
||||
such as `def foo(): return "mock" # fmt: skip`, where previously the declaration
|
||||
would have been incorrectly collapsed.
|
||||
|
||||
(labels/unstable-features)=
|
||||
|
||||
@ -43,13 +36,38 @@ The unstable style additionally includes the following features:
|
||||
|
||||
- `string_processing`: split long string literals and related changes
|
||||
([see below](labels/string-processing))
|
||||
- `wrap_long_dict_values_in_parens`: add parentheses to long values in dictionaries
|
||||
([see below](labels/wrap-long-dict-values))
|
||||
- `multiline_string_handling`: more compact formatting of expressions involving
|
||||
multiline strings ([see below](labels/multiline-string-handling))
|
||||
- `hug_parens_with_braces_and_square_brackets`: more compact formatting of nested
|
||||
brackets ([see below](labels/hug-parens))
|
||||
|
||||
(labels/wrap-long-dict-values)=
|
||||
|
||||
### Improved parentheses management in dicts
|
||||
|
||||
For dict literals with long values, they are now wrapped in parentheses. Unnecessary
|
||||
parentheses are now removed. For example:
|
||||
|
||||
```python
|
||||
my_dict = {
|
||||
"a key in my dict": a_very_long_variable
|
||||
* and_a_very_long_function_call()
|
||||
/ 100000.0,
|
||||
"another key": (short_value),
|
||||
}
|
||||
```
|
||||
|
||||
will be changed to:
|
||||
|
||||
```python
|
||||
my_dict = {
|
||||
"a key in my dict": (
|
||||
a_very_long_variable * and_a_very_long_function_call() / 100000.0
|
||||
),
|
||||
"another key": short_value,
|
||||
}
|
||||
```
|
||||
|
||||
(labels/hug-parens)=
|
||||
|
||||
### Improved multiline dictionary and list indentation for sole function parameter
|
||||
@ -130,37 +148,11 @@ foo(
|
||||
|
||||
_Black_ will split long string literals and merge short ones. Parentheses are used where
|
||||
appropriate. When split, parts of f-strings that don't need formatting are converted to
|
||||
plain strings. User-made splits are respected when they do not exceed the line length
|
||||
limit. Line continuation backslashes are converted into parenthesized strings.
|
||||
Unnecessary parentheses are stripped. The stability and status of this feature is
|
||||
tracked in [this issue](https://github.com/psf/black/issues/2188).
|
||||
|
||||
(labels/wrap-long-dict-values)=
|
||||
|
||||
### Improved parentheses management in dicts
|
||||
|
||||
For dict literals with long values, they are now wrapped in parentheses. Unnecessary
|
||||
parentheses are now removed. For example:
|
||||
|
||||
```python
|
||||
my_dict = {
|
||||
"a key in my dict": a_very_long_variable
|
||||
* and_a_very_long_function_call()
|
||||
/ 100000.0,
|
||||
"another key": (short_value),
|
||||
}
|
||||
```
|
||||
|
||||
will be changed to:
|
||||
|
||||
```python
|
||||
my_dict = {
|
||||
"a key in my dict": (
|
||||
a_very_long_variable * and_a_very_long_function_call() / 100000.0
|
||||
),
|
||||
"another key": short_value,
|
||||
}
|
||||
```
|
||||
plain strings. f-strings will not be merged if they contain internal quotes and it would
|
||||
change their quotation mark style. User-made splits are respected when they do not
|
||||
exceed the line length limit. Line continuation backslashes are converted into
|
||||
parenthesized strings. Unnecessary parentheses are stripped. The stability and status of
|
||||
this feature istracked in [this issue](https://github.com/psf/black/issues/2188).
|
||||
|
||||
(labels/multiline-string-handling)=
|
||||
|
||||
@ -275,52 +267,3 @@ s = ( # Top comment
|
||||
# Bottom comment
|
||||
)
|
||||
```
|
||||
|
||||
## Potential future changes
|
||||
|
||||
This section lists changes that we may want to make in the future, but that aren't
|
||||
implemented yet.
|
||||
|
||||
### Using backslashes for with statements
|
||||
|
||||
[Backslashes are bad and should be never be used](labels/why-no-backslashes) however
|
||||
there is one exception: `with` statements using multiple context managers. Before Python
|
||||
3.9 Python's grammar does not allow organizing parentheses around the series of context
|
||||
managers.
|
||||
|
||||
We don't want formatting like:
|
||||
|
||||
```py3
|
||||
with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4:
|
||||
... # nothing to split on - line too long
|
||||
```
|
||||
|
||||
So _Black_ will, when we implement this, format it like this:
|
||||
|
||||
```py3
|
||||
with \
|
||||
make_context_manager1() as cm1, \
|
||||
make_context_manager2() as cm2, \
|
||||
make_context_manager3() as cm3, \
|
||||
make_context_manager4() as cm4 \
|
||||
:
|
||||
... # backslashes and an ugly stranded colon
|
||||
```
|
||||
|
||||
Although when the target version is Python 3.9 or higher, _Black_ uses parentheses
|
||||
instead in `--preview` mode (see below) since they're allowed in Python 3.9 and higher.
|
||||
|
||||
An alternative to consider if the backslashes in the above formatting are undesirable is
|
||||
to use {external:py:obj}`contextlib.ExitStack` to combine context managers in the
|
||||
following way:
|
||||
|
||||
```python
|
||||
with contextlib.ExitStack() as exit_stack:
|
||||
cm1 = exit_stack.enter_context(make_context_manager1())
|
||||
cm2 = exit_stack.enter_context(make_context_manager2())
|
||||
cm3 = exit_stack.enter_context(make_context_manager3())
|
||||
cm4 = exit_stack.enter_context(make_context_manager4())
|
||||
...
|
||||
```
|
||||
|
||||
(labels/preview-style)=
|
||||
|
@ -8,16 +8,16 @@ _Black_ images with the following tags are available:
|
||||
- release numbers, e.g. `21.5b2`, `21.6b0`, `21.7b0` etc.\
|
||||
ℹ Recommended for users who want to use a particular version of _Black_.
|
||||
- `latest_release` - tag created when a new version of _Black_ is released.\
|
||||
ℹ Recommended for users who want to use released versions of _Black_. It maps to [the latest release](https://github.com/psf/black/releases/latest)
|
||||
of _Black_.
|
||||
ℹ Recommended for users who want to use released versions of _Black_. It maps to
|
||||
[the latest release](https://github.com/psf/black/releases/latest) of _Black_.
|
||||
- `latest_prerelease` - tag created when a new alpha (prerelease) version of _Black_ is
|
||||
released.\
|
||||
ℹ Recommended for users who want to preview or test alpha versions of _Black_. Note that
|
||||
the most recent release may be newer than any prerelease, because no prereleases are created
|
||||
before most releases.
|
||||
ℹ Recommended for users who want to preview or test alpha versions of _Black_. Note
|
||||
that the most recent release may be newer than any prerelease, because no prereleases
|
||||
are created before most releases.
|
||||
- `latest` - tag used for the newest image of _Black_.\
|
||||
ℹ Recommended for users who always want to use the latest version of _Black_, even before
|
||||
it is released.
|
||||
ℹ Recommended for users who always want to use the latest version of _Black_, even
|
||||
before it is released.
|
||||
|
||||
There is one more tag used for _Black_ Docker images - `latest_non_release`. It is
|
||||
created for all unreleased
|
||||
|
@ -70,17 +70,17 @@ See also [the style documentation](labels/line-length).
|
||||
|
||||
Python versions that should be supported by Black's output. You can run `black --help`
|
||||
and look for the `--target-version` option to see the full list of supported versions.
|
||||
You should include all versions that your code supports. If you support Python 3.8
|
||||
through 3.11, you should write:
|
||||
You should include all versions that your code supports. If you support Python 3.11
|
||||
through 3.13, you should write:
|
||||
|
||||
```console
|
||||
$ black -t py38 -t py39 -t py310 -t py311
|
||||
$ black -t py311 -t py312 -t py313
|
||||
```
|
||||
|
||||
In a [configuration file](#configuration-via-a-file), you can write:
|
||||
|
||||
```toml
|
||||
target-version = ["py38", "py39", "py310", "py311"]
|
||||
target-version = ["py311", "py312", "py313"]
|
||||
```
|
||||
|
||||
By default, Black will infer target versions from the project metadata in
|
||||
@ -269,8 +269,8 @@ configuration file for consistent results across environments.
|
||||
|
||||
```console
|
||||
$ black --version
|
||||
black, 24.4.1 (compiled: yes)
|
||||
$ black --required-version 24.4.1 -c "format = 'this'"
|
||||
black, 25.1.0 (compiled: yes)
|
||||
$ black --required-version 25.1.0 -c "format = 'this'"
|
||||
format = "this"
|
||||
$ black --required-version 31.5b2 -c "still = 'beta?!'"
|
||||
Oh no! 💥 💔 💥 The required version does not match the running version!
|
||||
@ -366,7 +366,7 @@ You can check the version of _Black_ you have installed using the `--version` fl
|
||||
|
||||
```console
|
||||
$ black --version
|
||||
black, 24.4.1
|
||||
black, 25.1.0
|
||||
```
|
||||
|
||||
#### `--config`
|
||||
@ -478,9 +478,10 @@ operating system, this configuration file should be stored as:
|
||||
`XDG_CONFIG_HOME` environment variable is not set)
|
||||
|
||||
Note that these are paths to the TOML file itself (meaning that they shouldn't be named
|
||||
as `pyproject.toml`), not directories where you store the configuration. Here, `~`
|
||||
refers to the path to your home directory. On Windows, this will be something like
|
||||
`C:\\Users\UserName`.
|
||||
as `pyproject.toml`), not directories where you store the configuration (i.e.,
|
||||
`black`/`.black` is the file to create and add your configuration options to, in the
|
||||
`~/.config/` directory). Here, `~` refers to the path to your home directory. On
|
||||
Windows, this will be something like `C:\\Users\UserName`.
|
||||
|
||||
You can also explicitly specify the path to a particular file that you want with
|
||||
`--config`. In this situation _Black_ will not look for any other file.
|
||||
|
@ -7,15 +7,16 @@
|
||||
import venv
|
||||
import zipfile
|
||||
from argparse import ArgumentParser, Namespace
|
||||
from collections.abc import Generator
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import lru_cache, partial
|
||||
from pathlib import Path
|
||||
from typing import Generator, List, NamedTuple, Optional, Tuple, Union, cast
|
||||
from typing import NamedTuple, Optional, Union, cast
|
||||
from urllib.request import urlopen, urlretrieve
|
||||
|
||||
PYPI_INSTANCE = "https://pypi.org/pypi"
|
||||
PYPI_TOP_PACKAGES = (
|
||||
"https://hugovk.github.io/top-pypi-packages/top-pypi-packages-30-days.min.json"
|
||||
"https://hugovk.github.io/top-pypi-packages/top-pypi-packages.min.json"
|
||||
)
|
||||
INTERNAL_BLACK_REPO = f"{tempfile.gettempdir()}/__black"
|
||||
|
||||
@ -54,7 +55,7 @@ def get_pypi_download_url(package: str, version: Optional[str]) -> str:
|
||||
return cast(str, source["url"])
|
||||
|
||||
|
||||
def get_top_packages() -> List[str]:
|
||||
def get_top_packages() -> list[str]:
|
||||
with urlopen(PYPI_TOP_PACKAGES) as page:
|
||||
result = json.load(page)
|
||||
|
||||
@ -150,7 +151,7 @@ def git_switch_branch(
|
||||
subprocess.run(args, cwd=repo)
|
||||
|
||||
|
||||
def init_repos(options: Namespace) -> Tuple[Path, ...]:
|
||||
def init_repos(options: Namespace) -> tuple[Path, ...]:
|
||||
options.output.mkdir(exist_ok=True)
|
||||
|
||||
if options.top_packages:
|
||||
@ -206,7 +207,7 @@ def format_repo_with_version(
|
||||
git_switch_branch(black_version.version, repo=black_repo)
|
||||
git_switch_branch(current_branch, repo=repo, new=True, from_branch=from_branch)
|
||||
|
||||
format_cmd: List[Union[Path, str]] = [
|
||||
format_cmd: list[Union[Path, str]] = [
|
||||
black_runner(black_version.version, black_repo),
|
||||
(black_repo / "black.py").resolve(),
|
||||
".",
|
||||
@ -222,7 +223,7 @@ def format_repo_with_version(
|
||||
return current_branch
|
||||
|
||||
|
||||
def format_repos(repos: Tuple[Path, ...], options: Namespace) -> None:
|
||||
def format_repos(repos: tuple[Path, ...], options: Namespace) -> None:
|
||||
black_versions = tuple(
|
||||
BlackVersion(*version.split(":")) for version in options.versions
|
||||
)
|
||||
|
@ -21,7 +21,7 @@ endif
|
||||
|
||||
if v:version < 700 || !has('python3')
|
||||
func! __BLACK_MISSING()
|
||||
echo "The black.vim plugin requires vim7.0+ with Python 3.6 support."
|
||||
echo "The black.vim plugin requires vim7.0+ with Python 3.9 support."
|
||||
endfunc
|
||||
command! Black :call __BLACK_MISSING()
|
||||
command! BlackUpgrade :call __BLACK_MISSING()
|
||||
@ -72,12 +72,11 @@ endif
|
||||
|
||||
function BlackComplete(ArgLead, CmdLine, CursorPos)
|
||||
return [
|
||||
\ 'target_version=py27',
|
||||
\ 'target_version=py36',
|
||||
\ 'target_version=py37',
|
||||
\ 'target_version=py38',
|
||||
\ 'target_version=py39',
|
||||
\ 'target_version=py310',
|
||||
\ 'target_version=py311',
|
||||
\ 'target_version=py312',
|
||||
\ 'target_version=py313',
|
||||
\ ]
|
||||
endfunction
|
||||
|
||||
|
@ -7,14 +7,15 @@
|
||||
|
||||
[tool.black]
|
||||
line-length = 88
|
||||
target-version = ['py38']
|
||||
target-version = ['py39']
|
||||
include = '\.pyi?$'
|
||||
extend-exclude = '''
|
||||
/(
|
||||
# The following are specific to Black, you probably don't want those.
|
||||
tests/data
|
||||
| profiling
|
||||
)/
|
||||
tests/data/
|
||||
| profiling/
|
||||
| scripts/generate_schema.py # Uses match syntax
|
||||
)
|
||||
'''
|
||||
# We use the unstable style for formatting Black itself. If you
|
||||
# want bug-free formatting, you should keep this off. If you want
|
||||
@ -32,8 +33,8 @@ build-backend = "hatchling.build"
|
||||
[project]
|
||||
name = "black"
|
||||
description = "The uncompromising code formatter."
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.8"
|
||||
license = "MIT"
|
||||
requires-python = ">=3.9"
|
||||
authors = [
|
||||
{ name = "Łukasz Langa", email = "lukasz@langa.pl" },
|
||||
]
|
||||
@ -54,11 +55,11 @@ classifiers = [
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||
"Topic :: Software Development :: Quality Assurance",
|
||||
]
|
||||
@ -68,6 +69,7 @@ dependencies = [
|
||||
"packaging>=22.0",
|
||||
"pathspec>=0.9.0",
|
||||
"platformdirs>=2",
|
||||
"pytokens>=0.1.10",
|
||||
"tomli>=1.1.0; python_version < '3.11'",
|
||||
"typing_extensions>=4.0.1; python_version < '3.11'",
|
||||
]
|
||||
@ -76,10 +78,7 @@ dynamic = ["readme", "version"]
|
||||
[project.optional-dependencies]
|
||||
colorama = ["colorama>=0.4.3"]
|
||||
uvloop = ["uvloop>=0.15.2"]
|
||||
d = [
|
||||
"aiohttp>=3.7.4; sys_platform != 'win32' or implementation_name != 'pypy'",
|
||||
"aiohttp>=3.7.4, !=3.9.0; sys_platform == 'win32' and implementation_name == 'pypy'",
|
||||
]
|
||||
d = ["aiohttp>=3.10"]
|
||||
jupyter = [
|
||||
"ipython>=7.8.0",
|
||||
"tokenize-rt>=3.2.0",
|
||||
@ -93,8 +92,10 @@ blackd = "blackd:patched_main [d]"
|
||||
black = "black.schema:get_schema"
|
||||
|
||||
[project.urls]
|
||||
Documentation = "https://black.readthedocs.io/"
|
||||
Changelog = "https://github.com/psf/black/blob/main/CHANGES.md"
|
||||
Homepage = "https://github.com/psf/black"
|
||||
Repository = "https://github.com/psf/black"
|
||||
Issues = "https://github.com/psf/black/issues"
|
||||
|
||||
[tool.hatch.metadata.hooks.fancy-pypi-readme]
|
||||
content-type = "text/markdown"
|
||||
@ -125,8 +126,8 @@ macos-max-compat = true
|
||||
enable-by-default = false
|
||||
dependencies = [
|
||||
"hatch-mypyc>=0.16.0",
|
||||
"mypy==1.7.1",
|
||||
"click==8.1.3", # avoid https://github.com/pallets/click/issues/2558
|
||||
"mypy>=1.12",
|
||||
"click>=8.1.7",
|
||||
]
|
||||
require-runtime-dependencies = true
|
||||
exclude = [
|
||||
@ -149,12 +150,14 @@ options = { debug_level = "0" }
|
||||
|
||||
[tool.cibuildwheel]
|
||||
build-verbosity = 1
|
||||
|
||||
# So these are the environments we target:
|
||||
# - Python: CPython 3.8+ only
|
||||
# - Python: CPython 3.9+ only
|
||||
# - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64
|
||||
# - OS: Linux (no musl), Windows, and macOS
|
||||
build = "cp3*"
|
||||
skip = ["*-manylinux_i686", "*-musllinux_*", "*-win32", "pp*"]
|
||||
|
||||
# This is the bare minimum needed to run the test suite. Pulling in the full
|
||||
# test_requirements.txt would download a bunch of other packages not necessary
|
||||
# here and would slow down the testing step a fair bit.
|
||||
@ -169,11 +172,9 @@ test-skip = ["*-macosx_arm64", "*-macosx_universal2:arm64"]
|
||||
HATCH_BUILD_HOOKS_ENABLE = "1"
|
||||
MYPYC_OPT_LEVEL = "3"
|
||||
MYPYC_DEBUG_LEVEL = "0"
|
||||
# CPython 3.11 wheels aren't available for aiohttp and building a Cython extension
|
||||
# from source also doesn't work.
|
||||
AIOHTTP_NO_EXTENSIONS = "1"
|
||||
|
||||
[tool.cibuildwheel.linux]
|
||||
manylinux-x86_64-image = "manylinux_2_28"
|
||||
before-build = [
|
||||
"yum install -y clang gcc",
|
||||
]
|
||||
@ -182,19 +183,10 @@ before-build = [
|
||||
HATCH_BUILD_HOOKS_ENABLE = "1"
|
||||
MYPYC_OPT_LEVEL = "3"
|
||||
MYPYC_DEBUG_LEVEL = "0"
|
||||
AIOHTTP_NO_EXTENSIONS = "1"
|
||||
|
||||
# Black needs Clang to compile successfully on Linux.
|
||||
CC = "clang"
|
||||
|
||||
[tool.cibuildwheel.macos]
|
||||
build-frontend = { name = "build", args = ["--no-isolation"] }
|
||||
# Unfortunately, hatch doesn't respect MACOSX_DEPLOYMENT_TARGET
|
||||
before-build = [
|
||||
"python -m pip install 'hatchling==1.20.0' hatch-vcs hatch-fancy-pypi-readme 'hatch-mypyc>=0.16.0' 'mypy==1.7.1' 'click==8.1.3'",
|
||||
"""sed -i '' -e "600,700s/'10_16'/os.environ['MACOSX_DEPLOYMENT_TARGET'].replace('.', '_')/" $(python -c 'import hatchling.builders.wheel as h; print(h.__file__)') """,
|
||||
]
|
||||
|
||||
[tool.isort]
|
||||
atomic = true
|
||||
profile = "black"
|
||||
@ -214,23 +206,7 @@ markers = [
|
||||
"incompatible_with_mypyc: run when testing mypyc compiled black"
|
||||
]
|
||||
xfail_strict = true
|
||||
filterwarnings = [
|
||||
"error",
|
||||
# this is mitigated by a try/catch in https://github.com/psf/black/pull/2974/
|
||||
# this ignore can be removed when support for aiohttp 3.7 is dropped.
|
||||
'''ignore:Decorator `@unittest_run_loop` is no longer needed in aiohttp 3\.8\+:DeprecationWarning''',
|
||||
# this is mitigated by a try/catch in https://github.com/psf/black/pull/3198/
|
||||
# this ignore can be removed when support for aiohttp 3.x is dropped.
|
||||
'''ignore:Middleware decorator is deprecated since 4\.0 and its behaviour is default, you can simply remove this decorator:DeprecationWarning''',
|
||||
# aiohttp is using deprecated cgi modules - Safe to remove when fixed:
|
||||
# https://github.com/aio-libs/aiohttp/issues/6905
|
||||
'''ignore:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning''',
|
||||
# Work around https://github.com/pytest-dev/pytest/issues/10977 for Python 3.12
|
||||
'''ignore:(Attribute s|Attribute n|ast.Str|ast.Bytes|ast.NameConstant|ast.Num) is deprecated and will be removed in Python 3.14:DeprecationWarning''',
|
||||
# Will be fixed with aiohttp 3.9.0
|
||||
# https://github.com/aio-libs/aiohttp/pull/7302
|
||||
"ignore:datetime.*utcfromtimestamp\\(\\) is deprecated and scheduled for removal:DeprecationWarning",
|
||||
]
|
||||
filterwarnings = ["error"]
|
||||
[tool.coverage.report]
|
||||
omit = [
|
||||
"src/blib2to3/*",
|
||||
@ -246,9 +222,11 @@ branch = true
|
||||
# Specify the target platform details in config, so your developers are
|
||||
# free to run mypy on Windows, Linux, or macOS and get consistent
|
||||
# results.
|
||||
python_version = "3.8"
|
||||
python_version = "3.9"
|
||||
mypy_path = "src"
|
||||
strict = true
|
||||
strict_bytes = true
|
||||
local_partial_types = true
|
||||
# Unreachable blocks have been an issue when compiling mypyc, let's try to avoid 'em in the first place.
|
||||
warn_unreachable = true
|
||||
implicit_reexport = true
|
||||
|
@ -24,17 +24,12 @@
|
||||
from base64 import b64encode
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Any, Final, Literal
|
||||
|
||||
import click
|
||||
import urllib3
|
||||
from packaging.version import Version
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
from typing import Final, Literal
|
||||
else:
|
||||
from typing_extensions import Final, Literal
|
||||
|
||||
COMMENT_FILE: Final = ".pr-comment.json"
|
||||
DIFF_STEP_NAME: Final = "Generate HTML diff report"
|
||||
DOCS_URL: Final = (
|
||||
|
@ -5,14 +5,11 @@
|
||||
a coverage-guided fuzzer I'm working on.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
import hypothesmith
|
||||
from hypothesis import HealthCheck, given, settings
|
||||
from hypothesis import strategies as st
|
||||
|
||||
import black
|
||||
from blib2to3.pgen2.tokenize import TokenError
|
||||
|
||||
|
||||
# This test uses the Hypothesis and Hypothesmith libraries to generate random
|
||||
@ -45,23 +42,7 @@ def test_idempotent_any_syntatically_valid_python(
|
||||
compile(src_contents, "<string>", "exec") # else the bug is in hypothesmith
|
||||
|
||||
# Then format the code...
|
||||
try:
|
||||
dst_contents = black.format_str(src_contents, mode=mode)
|
||||
except black.InvalidInput:
|
||||
# This is a bug - if it's valid Python code, as above, Black should be
|
||||
# able to cope with it. See issues #970, #1012
|
||||
# TODO: remove this try-except block when issues are resolved.
|
||||
return
|
||||
except TokenError as e:
|
||||
if ( # Special-case logic for backslashes followed by newlines or end-of-input
|
||||
e.args[0] == "EOF in multi-line statement"
|
||||
and re.search(r"\\($|\r?\n)", src_contents) is not None
|
||||
):
|
||||
# This is a bug - if it's valid Python code, as above, Black should be
|
||||
# able to cope with it. See issue #1012.
|
||||
# TODO: remove this block when the issue is resolved.
|
||||
return
|
||||
raise
|
||||
dst_contents = black.format_str(src_contents, mode=mode)
|
||||
|
||||
# And check that we got equivalent and stable output.
|
||||
black.assert_equivalent(src_contents, dst_contents)
|
||||
@ -80,7 +61,7 @@ def test_idempotent_any_syntatically_valid_python(
|
||||
try:
|
||||
import sys
|
||||
|
||||
import atheris # type: ignore[import-not-found]
|
||||
import atheris
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
|
@ -53,7 +53,7 @@ def main(schemastore: bool, outfile: IO[str]) -> None:
|
||||
schema: dict[str, Any] = {
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"$id": (
|
||||
"https://github.com/psf/black/blob/main/black/resources/black.schema.json"
|
||||
"https://github.com/psf/black/blob/main/src/black/resources/black.schema.json"
|
||||
),
|
||||
"$comment": "tool.black table in pyproject.toml",
|
||||
"type": "object",
|
||||
|
@ -17,13 +17,13 @@
|
||||
"""
|
||||
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from os.path import basename, dirname, join
|
||||
from typing import Iterable, Tuple
|
||||
|
||||
import wcwidth # type: ignore[import-not-found]
|
||||
|
||||
|
||||
def make_width_table() -> Iterable[Tuple[int, int, int]]:
|
||||
def make_width_table() -> Iterable[tuple[int, int, int]]:
|
||||
start_codepoint = -1
|
||||
end_codepoint = -1
|
||||
range_width = -2
|
||||
@ -53,9 +53,9 @@ def main() -> None:
|
||||
f.write(f"""# Generated by {basename(__file__)}
|
||||
# wcwidth {wcwidth.__version__}
|
||||
# Unicode {wcwidth.list_versions()[-1]}
|
||||
from typing import Final, List, Tuple
|
||||
from typing import Final
|
||||
|
||||
WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [
|
||||
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
|
||||
""")
|
||||
for triple in make_width_table():
|
||||
f.write(f" {triple!r},\n")
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
|
||||
def git(*args: str) -> str:
|
||||
return check_output(["git"] + list(args)).decode("utf8").strip()
|
||||
return check_output(["git", *args]).decode("utf8").strip()
|
||||
|
||||
|
||||
def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> int:
|
||||
@ -26,19 +26,19 @@ def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> in
|
||||
merge_base = git("merge-base", "HEAD", base_branch)
|
||||
if not merge_base:
|
||||
logger.error(
|
||||
"Could not find a common commit for current head and %s" % base_branch
|
||||
f"Could not find a common commit for current head and {base_branch}"
|
||||
)
|
||||
return 1
|
||||
|
||||
commits = git(
|
||||
"log", "--reverse", "--pretty=format:%H", "%s~1..HEAD" % merge_base
|
||||
"log", "--reverse", "--pretty=format:%H", f"{merge_base}~1..HEAD"
|
||||
).split()
|
||||
for commit in commits:
|
||||
git("checkout", commit, "-b%s-black" % commit)
|
||||
git("checkout", commit, f"-b{commit}-black")
|
||||
check_output(black_command, shell=True)
|
||||
git("commit", "-aqm", "blackify")
|
||||
|
||||
git("checkout", base_branch, "-b%s-black" % current_branch)
|
||||
git("checkout", base_branch, f"-b{current_branch}-black")
|
||||
|
||||
for last_commit, commit in zip(commits, commits[1:]):
|
||||
allow_empty = (
|
||||
@ -51,7 +51,7 @@ def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> in
|
||||
"diff",
|
||||
"--binary",
|
||||
"--find-copies",
|
||||
"%s-black..%s-black" % (last_commit, commit),
|
||||
f"{last_commit}-black..{commit}-black",
|
||||
],
|
||||
stdout=PIPE,
|
||||
)
|
||||
@ -77,7 +77,7 @@ def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> in
|
||||
git("commit", "--allow-empty", "-aqC", commit)
|
||||
|
||||
for commit in commits:
|
||||
git("branch", "-qD", "%s-black" % commit)
|
||||
git("branch", "-qD", f"{commit}-black")
|
||||
|
||||
return 0
|
||||
|
||||
|
@ -11,8 +11,7 @@
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from subprocess import PIPE, run
|
||||
from typing import List
|
||||
from subprocess import run
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
NEW_VERSION_CHANGELOG_TEMPLATE = """\
|
||||
@ -70,9 +69,9 @@ class NoGitTagsError(Exception): ... # noqa: E701,E761
|
||||
|
||||
# TODO: Do better with alpha + beta releases
|
||||
# Maybe we vendor packaging library
|
||||
def get_git_tags(versions_only: bool = True) -> List[str]:
|
||||
def get_git_tags(versions_only: bool = True) -> list[str]:
|
||||
"""Pull out all tags or calvers only"""
|
||||
cp = run(["git", "tag"], stdout=PIPE, stderr=PIPE, check=True, encoding="utf8")
|
||||
cp = run(["git", "tag"], capture_output=True, check=True, encoding="utf8")
|
||||
if not cp.stdout:
|
||||
LOG.error(f"Returned no git tags stdout: {cp.stderr}")
|
||||
raise NoGitTagsError
|
||||
|
@ -5,28 +5,22 @@
|
||||
import sys
|
||||
import tokenize
|
||||
import traceback
|
||||
from collections.abc import (
|
||||
Collection,
|
||||
Generator,
|
||||
Iterator,
|
||||
MutableMapping,
|
||||
Sequence,
|
||||
Sized,
|
||||
)
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import replace
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from json.decoder import JSONDecodeError
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterator,
|
||||
List,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Set,
|
||||
Sized,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from re import Pattern
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import click
|
||||
from click.core import ParameterSource
|
||||
@ -57,12 +51,12 @@
|
||||
)
|
||||
from black.handle_ipynb_magics import (
|
||||
PYTHON_CELL_MAGICS,
|
||||
TRANSFORMED_MAGICS,
|
||||
jupyter_dependencies_are_installed,
|
||||
mask_cell,
|
||||
put_trailing_semicolon_back,
|
||||
remove_trailing_semicolon,
|
||||
unmask_cell,
|
||||
validate_cell,
|
||||
)
|
||||
from black.linegen import LN, LineGenerator, transform_line
|
||||
from black.lines import EmptyLineTracker, LinesBlock
|
||||
@ -176,7 +170,7 @@ def read_pyproject_toml(
|
||||
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
|
||||
)
|
||||
|
||||
default_map: Dict[str, Any] = {}
|
||||
default_map: dict[str, Any] = {}
|
||||
if ctx.default_map:
|
||||
default_map.update(ctx.default_map)
|
||||
default_map.update(config)
|
||||
@ -186,9 +180,9 @@ def read_pyproject_toml(
|
||||
|
||||
|
||||
def spellcheck_pyproject_toml_keys(
|
||||
ctx: click.Context, config_keys: List[str], config_file_path: str
|
||||
ctx: click.Context, config_keys: list[str], config_file_path: str
|
||||
) -> None:
|
||||
invalid_keys: List[str] = []
|
||||
invalid_keys: list[str] = []
|
||||
available_config_options = {param.name for param in ctx.command.params}
|
||||
for key in config_keys:
|
||||
if key not in available_config_options:
|
||||
@ -202,8 +196,8 @@ def spellcheck_pyproject_toml_keys(
|
||||
|
||||
|
||||
def target_version_option_callback(
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
||||
) -> List[TargetVersion]:
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||
) -> list[TargetVersion]:
|
||||
"""Compute the target versions from a --target-version flag.
|
||||
|
||||
This is its own function because mypy couldn't infer the type correctly
|
||||
@ -213,8 +207,8 @@ def target_version_option_callback(
|
||||
|
||||
|
||||
def enable_unstable_feature_callback(
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
||||
) -> List[Preview]:
|
||||
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||
) -> list[Preview]:
|
||||
"""Compute the features from an --enable-unstable-feature flag."""
|
||||
return [Preview[val] for val in v]
|
||||
|
||||
@ -519,7 +513,7 @@ def main( # noqa: C901
|
||||
ctx: click.Context,
|
||||
code: Optional[str],
|
||||
line_length: int,
|
||||
target_version: List[TargetVersion],
|
||||
target_version: list[TargetVersion],
|
||||
check: bool,
|
||||
diff: bool,
|
||||
line_ranges: Sequence[str],
|
||||
@ -533,7 +527,7 @@ def main( # noqa: C901
|
||||
skip_magic_trailing_comma: bool,
|
||||
preview: bool,
|
||||
unstable: bool,
|
||||
enable_unstable_feature: List[Preview],
|
||||
enable_unstable_feature: list[Preview],
|
||||
quiet: bool,
|
||||
verbose: bool,
|
||||
required_version: Optional[str],
|
||||
@ -543,12 +537,21 @@ def main( # noqa: C901
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
stdin_filename: Optional[str],
|
||||
workers: Optional[int],
|
||||
src: Tuple[str, ...],
|
||||
src: tuple[str, ...],
|
||||
config: Optional[str],
|
||||
) -> None:
|
||||
"""The uncompromising code formatter."""
|
||||
ctx.ensure_object(dict)
|
||||
|
||||
assert sys.version_info >= (3, 9), "Black requires Python 3.9+"
|
||||
if sys.version_info[:3] == (3, 12, 5):
|
||||
out(
|
||||
"Python 3.12.5 has a memory safety issue that can cause Black's "
|
||||
"AST safety checks to fail. "
|
||||
"Please upgrade to Python 3.12.6 or downgrade to Python 3.12.4"
|
||||
)
|
||||
ctx.exit(1)
|
||||
|
||||
if src and code is not None:
|
||||
out(
|
||||
main.get_usage(ctx)
|
||||
@ -634,7 +637,7 @@ def main( # noqa: C901
|
||||
enabled_features=set(enable_unstable_feature),
|
||||
)
|
||||
|
||||
lines: List[Tuple[int, int]] = []
|
||||
lines: list[tuple[int, int]] = []
|
||||
if line_ranges:
|
||||
if ipynb:
|
||||
err("Cannot use --line-ranges with ipynb files.")
|
||||
@ -724,7 +727,7 @@ def main( # noqa: C901
|
||||
def get_sources(
|
||||
*,
|
||||
root: Path,
|
||||
src: Tuple[str, ...],
|
||||
src: tuple[str, ...],
|
||||
quiet: bool,
|
||||
verbose: bool,
|
||||
include: Pattern[str],
|
||||
@ -733,19 +736,25 @@ def get_sources(
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
report: "Report",
|
||||
stdin_filename: Optional[str],
|
||||
) -> Set[Path]:
|
||||
) -> set[Path]:
|
||||
"""Compute the set of files to be formatted."""
|
||||
sources: Set[Path] = set()
|
||||
sources: set[Path] = set()
|
||||
|
||||
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
||||
using_default_exclude = exclude is None
|
||||
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
|
||||
gitignore: Optional[Dict[Path, PathSpec]] = None
|
||||
gitignore: Optional[dict[Path, PathSpec]] = None
|
||||
root_gitignore = get_gitignore(root)
|
||||
|
||||
for s in src:
|
||||
if s == "-" and stdin_filename:
|
||||
path = Path(stdin_filename)
|
||||
if path_is_excluded(stdin_filename, force_exclude):
|
||||
report.path_ignored(
|
||||
path,
|
||||
"--stdin-filename matches the --force-exclude regular expression",
|
||||
)
|
||||
continue
|
||||
is_stdin = True
|
||||
else:
|
||||
path = Path(s)
|
||||
@ -832,7 +841,7 @@ def reformat_code(
|
||||
mode: Mode,
|
||||
report: Report,
|
||||
*,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> None:
|
||||
"""
|
||||
Reformat and print out `content` without spawning child processes.
|
||||
@ -865,7 +874,7 @@ def reformat_one(
|
||||
mode: Mode,
|
||||
report: "Report",
|
||||
*,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> None:
|
||||
"""Reformat a single file under `src` without spawning child processes.
|
||||
|
||||
@ -921,7 +930,7 @@ def format_file_in_place(
|
||||
write_back: WriteBack = WriteBack.NO,
|
||||
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
|
||||
*,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> bool:
|
||||
"""Format file under `src` path. Return True if changed.
|
||||
|
||||
@ -988,7 +997,7 @@ def format_stdin_to_stdout(
|
||||
content: Optional[str] = None,
|
||||
write_back: WriteBack = WriteBack.NO,
|
||||
mode: Mode,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> bool:
|
||||
"""Format file on stdin. Return True if changed.
|
||||
|
||||
@ -1039,7 +1048,7 @@ def check_stability_and_equivalence(
|
||||
dst_contents: str,
|
||||
*,
|
||||
mode: Mode,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> None:
|
||||
"""Perform stability and equivalence checks.
|
||||
|
||||
@ -1056,7 +1065,7 @@ def format_file_contents(
|
||||
*,
|
||||
fast: bool,
|
||||
mode: Mode,
|
||||
lines: Collection[Tuple[int, int]] = (),
|
||||
lines: Collection[tuple[int, int]] = (),
|
||||
) -> FileContent:
|
||||
"""Reformat contents of a file and return new contents.
|
||||
|
||||
@ -1079,32 +1088,6 @@ def format_file_contents(
|
||||
return dst_contents
|
||||
|
||||
|
||||
def validate_cell(src: str, mode: Mode) -> None:
|
||||
"""Check that cell does not already contain TransformerManager transformations,
|
||||
or non-Python cell magics, which might cause tokenizer_rt to break because of
|
||||
indentations.
|
||||
|
||||
If a cell contains ``!ls``, then it'll be transformed to
|
||||
``get_ipython().system('ls')``. However, if the cell originally contained
|
||||
``get_ipython().system('ls')``, then it would get transformed in the same way:
|
||||
|
||||
>>> TransformerManager().transform_cell("get_ipython().system('ls')")
|
||||
"get_ipython().system('ls')\n"
|
||||
>>> TransformerManager().transform_cell("!ls")
|
||||
"get_ipython().system('ls')\n"
|
||||
|
||||
Due to the impossibility of safely roundtripping in such situations, cells
|
||||
containing transformed magics will be ignored.
|
||||
"""
|
||||
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
|
||||
raise NothingChanged
|
||||
if (
|
||||
src[:2] == "%%"
|
||||
and src.split()[0][2:] not in PYTHON_CELL_MAGICS | mode.python_cell_magics
|
||||
):
|
||||
raise NothingChanged
|
||||
|
||||
|
||||
def format_cell(src: str, *, fast: bool, mode: Mode) -> str:
|
||||
"""Format code in given cell of Jupyter notebook.
|
||||
|
||||
@ -1187,7 +1170,7 @@ def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileCon
|
||||
|
||||
|
||||
def format_str(
|
||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
||||
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||
) -> str:
|
||||
"""Reformat a string and return new contents.
|
||||
|
||||
@ -1234,10 +1217,10 @@ def f(
|
||||
|
||||
|
||||
def _format_str_once(
|
||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
||||
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||
) -> str:
|
||||
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
|
||||
dst_blocks: List[LinesBlock] = []
|
||||
dst_blocks: list[LinesBlock] = []
|
||||
if mode.target_versions:
|
||||
versions = mode.target_versions
|
||||
else:
|
||||
@ -1287,7 +1270,7 @@ def _format_str_once(
|
||||
return "".join(dst_contents)
|
||||
|
||||
|
||||
def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
||||
def decode_bytes(src: bytes) -> tuple[FileContent, Encoding, NewLine]:
|
||||
"""Return a tuple of (decoded_contents, encoding, newline).
|
||||
|
||||
`newline` is either CRLF or LF but `decoded_contents` is decoded with
|
||||
@ -1305,8 +1288,8 @@ def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
||||
|
||||
|
||||
def get_features_used( # noqa: C901
|
||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
||||
) -> Set[Feature]:
|
||||
node: Node, *, future_imports: Optional[set[str]] = None
|
||||
) -> set[Feature]:
|
||||
"""Return a set of (relatively) new Python features used in this file.
|
||||
|
||||
Currently looking for:
|
||||
@ -1324,7 +1307,7 @@ def get_features_used( # noqa: C901
|
||||
- except* clause;
|
||||
- variadic generics;
|
||||
"""
|
||||
features: Set[Feature] = set()
|
||||
features: set[Feature] = set()
|
||||
if future_imports:
|
||||
features |= {
|
||||
FUTURE_FLAG_TO_FEATURE[future_import]
|
||||
@ -1462,8 +1445,8 @@ def _contains_asexpr(node: Union[Node, Leaf]) -> bool:
|
||||
|
||||
|
||||
def detect_target_versions(
|
||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
||||
) -> Set[TargetVersion]:
|
||||
node: Node, *, future_imports: Optional[set[str]] = None
|
||||
) -> set[TargetVersion]:
|
||||
"""Detect the version to target based on the nodes used."""
|
||||
features = get_features_used(node, future_imports=future_imports)
|
||||
return {
|
||||
@ -1471,11 +1454,11 @@ def detect_target_versions(
|
||||
}
|
||||
|
||||
|
||||
def get_future_imports(node: Node) -> Set[str]:
|
||||
def get_future_imports(node: Node) -> set[str]:
|
||||
"""Return a set of __future__ imports in the file."""
|
||||
imports: Set[str] = set()
|
||||
imports: set[str] = set()
|
||||
|
||||
def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
|
||||
def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]:
|
||||
for child in children:
|
||||
if isinstance(child, Leaf):
|
||||
if child.type == token.NAME:
|
||||
@ -1521,6 +1504,13 @@ def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
|
||||
return imports
|
||||
|
||||
|
||||
def _black_info() -> str:
|
||||
return (
|
||||
f"Black {__version__} on "
|
||||
f"Python ({platform.python_implementation()}) {platform.python_version()}"
|
||||
)
|
||||
|
||||
|
||||
def assert_equivalent(src: str, dst: str) -> None:
|
||||
"""Raise AssertionError if `src` and `dst` aren't equivalent."""
|
||||
try:
|
||||
@ -1538,7 +1528,7 @@ def assert_equivalent(src: str, dst: str) -> None:
|
||||
except Exception as exc:
|
||||
log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
|
||||
raise ASTSafetyError(
|
||||
f"INTERNAL ERROR: Black produced invalid code: {exc}. "
|
||||
f"INTERNAL ERROR: {_black_info()} produced invalid code: {exc}. "
|
||||
"Please report a bug on https://github.com/psf/black/issues. "
|
||||
f"This invalid output might be helpful: {log}"
|
||||
) from None
|
||||
@ -1548,14 +1538,14 @@ def assert_equivalent(src: str, dst: str) -> None:
|
||||
if src_ast_str != dst_ast_str:
|
||||
log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
|
||||
raise ASTSafetyError(
|
||||
"INTERNAL ERROR: Black produced code that is not equivalent to the"
|
||||
" source. Please report a bug on "
|
||||
f"https://github.com/psf/black/issues. This diff might be helpful: {log}"
|
||||
f"INTERNAL ERROR: {_black_info()} produced code that is not equivalent to"
|
||||
" the source. Please report a bug on https://github.com/psf/black/issues."
|
||||
f" This diff might be helpful: {log}"
|
||||
) from None
|
||||
|
||||
|
||||
def assert_stable(
|
||||
src: str, dst: str, mode: Mode, *, lines: Collection[Tuple[int, int]] = ()
|
||||
src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = ()
|
||||
) -> None:
|
||||
"""Raise AssertionError if `dst` reformats differently the second time."""
|
||||
if lines:
|
||||
@ -1576,9 +1566,9 @@ def assert_stable(
|
||||
diff(dst, newdst, "first pass", "second pass"),
|
||||
)
|
||||
raise AssertionError(
|
||||
"INTERNAL ERROR: Black produced different code on the second pass of the"
|
||||
" formatter. Please report a bug on https://github.com/psf/black/issues."
|
||||
f" This diff might be helpful: {log}"
|
||||
f"INTERNAL ERROR: {_black_info()} produced different code on the second"
|
||||
" pass of the formatter. Please report a bug on"
|
||||
f" https://github.com/psf/black/issues. This diff might be helpful: {log}"
|
||||
) from None
|
||||
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Generated by make_width_table.py
|
||||
# wcwidth 0.2.6
|
||||
# Unicode 15.0.0
|
||||
from typing import Final, List, Tuple
|
||||
from typing import Final
|
||||
|
||||
WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [
|
||||
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
|
||||
(0, 0, 0),
|
||||
(1, 31, -1),
|
||||
(127, 159, -1),
|
||||
|
@ -1,7 +1,8 @@
|
||||
"""Builds on top of nodes.py to track brackets."""
|
||||
|
||||
from collections.abc import Iterable, Sequence
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Final, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
||||
from typing import Final, Optional, Union
|
||||
|
||||
from black.nodes import (
|
||||
BRACKET,
|
||||
@ -60,12 +61,12 @@ class BracketTracker:
|
||||
"""Keeps track of brackets on a line."""
|
||||
|
||||
depth: int = 0
|
||||
bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||
delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
|
||||
bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||
delimiters: dict[LeafID, Priority] = field(default_factory=dict)
|
||||
previous: Optional[Leaf] = None
|
||||
_for_loop_depths: List[int] = field(default_factory=list)
|
||||
_lambda_argument_depths: List[int] = field(default_factory=list)
|
||||
invisible: List[Leaf] = field(default_factory=list)
|
||||
_for_loop_depths: list[int] = field(default_factory=list)
|
||||
_lambda_argument_depths: list[int] = field(default_factory=list)
|
||||
invisible: list[Leaf] = field(default_factory=list)
|
||||
|
||||
def mark(self, leaf: Leaf) -> None:
|
||||
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
||||
@ -353,7 +354,7 @@ def max_delimiter_priority_in_atom(node: LN) -> Priority:
|
||||
return 0
|
||||
|
||||
|
||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> Set[LeafID]:
|
||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]:
|
||||
"""Return leaves that are inside matching brackets.
|
||||
|
||||
The input `leaves` can have non-matching brackets at the head or tail parts.
|
||||
|
@ -5,9 +5,10 @@
|
||||
import pickle
|
||||
import sys
|
||||
import tempfile
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, NamedTuple, Set, Tuple
|
||||
from typing import NamedTuple
|
||||
|
||||
from platformdirs import user_cache_dir
|
||||
|
||||
@ -55,7 +56,7 @@ def get_cache_file(mode: Mode) -> Path:
|
||||
class Cache:
|
||||
mode: Mode
|
||||
cache_file: Path
|
||||
file_data: Dict[str, FileData] = field(default_factory=dict)
|
||||
file_data: dict[str, FileData] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def read(cls, mode: Mode) -> Self:
|
||||
@ -76,7 +77,7 @@ def read(cls, mode: Mode) -> Self:
|
||||
|
||||
with cache_file.open("rb") as fobj:
|
||||
try:
|
||||
data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)
|
||||
data: dict[str, tuple[float, int, str]] = pickle.load(fobj)
|
||||
file_data = {k: FileData(*v) for k, v in data.items()}
|
||||
except (pickle.UnpicklingError, ValueError, IndexError):
|
||||
return cls(mode, cache_file)
|
||||
@ -114,14 +115,14 @@ def is_changed(self, source: Path) -> bool:
|
||||
return True
|
||||
return False
|
||||
|
||||
def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
|
||||
def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]:
|
||||
"""Split an iterable of paths in `sources` into two sets.
|
||||
|
||||
The first contains paths of files that modified on disk or are not in the
|
||||
cache. The other contains paths to non-modified files.
|
||||
"""
|
||||
changed: Set[Path] = set()
|
||||
done: Set[Path] = set()
|
||||
changed: set[Path] = set()
|
||||
done: set[Path] = set()
|
||||
for src in sources:
|
||||
if self.is_changed(src):
|
||||
changed.add(src)
|
||||
@ -139,9 +140,8 @@ def write(self, sources: Iterable[Path]) -> None:
|
||||
with tempfile.NamedTemporaryFile(
|
||||
dir=str(self.cache_file.parent), delete=False
|
||||
) as f:
|
||||
# We store raw tuples in the cache because pickling NamedTuples
|
||||
# doesn't work with mypyc on Python 3.8, and because it's faster.
|
||||
data: Dict[str, Tuple[float, int, str]] = {
|
||||
# We store raw tuples in the cache because it's faster.
|
||||
data: dict[str, tuple[float, int, str]] = {
|
||||
k: (*v,) for k, v in self.file_data.items()
|
||||
}
|
||||
pickle.dump(data, f, protocol=4)
|
||||
|
@ -1,7 +1,8 @@
|
||||
import re
|
||||
from collections.abc import Collection, Iterator
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import Collection, Final, Iterator, List, Optional, Tuple, Union
|
||||
from typing import Final, Optional, Union
|
||||
|
||||
from black.mode import Mode, Preview
|
||||
from black.nodes import (
|
||||
@ -77,9 +78,9 @@ def generate_comments(leaf: LN) -> Iterator[Leaf]:
|
||||
|
||||
|
||||
@lru_cache(maxsize=4096)
|
||||
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
|
||||
def list_comments(prefix: str, *, is_endmarker: bool) -> list[ProtoComment]:
|
||||
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
||||
result: List[ProtoComment] = []
|
||||
result: list[ProtoComment] = []
|
||||
if not prefix or "#" not in prefix:
|
||||
return result
|
||||
|
||||
@ -166,7 +167,7 @@ def make_comment(content: str) -> str:
|
||||
|
||||
|
||||
def normalize_fmt_off(
|
||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
||||
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||
) -> None:
|
||||
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
||||
try_again = True
|
||||
@ -175,7 +176,7 @@ def normalize_fmt_off(
|
||||
|
||||
|
||||
def convert_one_fmt_off_pair(
|
||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
||||
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||
) -> bool:
|
||||
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
||||
|
||||
@ -184,24 +185,24 @@ def convert_one_fmt_off_pair(
|
||||
for leaf in node.leaves():
|
||||
previous_consumed = 0
|
||||
for comment in list_comments(leaf.prefix, is_endmarker=False):
|
||||
should_pass_fmt = comment.value in FMT_OFF or _contains_fmt_skip_comment(
|
||||
comment.value, mode
|
||||
)
|
||||
if not should_pass_fmt:
|
||||
is_fmt_off = comment.value in FMT_OFF
|
||||
is_fmt_skip = _contains_fmt_skip_comment(comment.value, mode)
|
||||
if (not is_fmt_off and not is_fmt_skip) or (
|
||||
# Invalid use when `# fmt: off` is applied before a closing bracket.
|
||||
is_fmt_off
|
||||
and leaf.type in CLOSING_BRACKETS
|
||||
):
|
||||
previous_consumed = comment.consumed
|
||||
continue
|
||||
# We only want standalone comments. If there's no previous leaf or
|
||||
# the previous leaf is indentation, it's a standalone comment in
|
||||
# disguise.
|
||||
if should_pass_fmt and comment.type != STANDALONE_COMMENT:
|
||||
if comment.type != STANDALONE_COMMENT:
|
||||
prev = preceding_leaf(leaf)
|
||||
if prev:
|
||||
if comment.value in FMT_OFF and prev.type not in WHITESPACE:
|
||||
if is_fmt_off and prev.type not in WHITESPACE:
|
||||
continue
|
||||
if (
|
||||
_contains_fmt_skip_comment(comment.value, mode)
|
||||
and prev.type in WHITESPACE
|
||||
):
|
||||
if is_fmt_skip and prev.type in WHITESPACE:
|
||||
continue
|
||||
|
||||
ignored_nodes = list(generate_ignored_nodes(leaf, comment, mode))
|
||||
@ -213,7 +214,7 @@ def convert_one_fmt_off_pair(
|
||||
prefix = first.prefix
|
||||
if comment.value in FMT_OFF:
|
||||
first.prefix = prefix[comment.consumed :]
|
||||
if _contains_fmt_skip_comment(comment.value, mode):
|
||||
if is_fmt_skip:
|
||||
first.prefix = ""
|
||||
standalone_comment_prefix = prefix
|
||||
else:
|
||||
@ -233,12 +234,8 @@ def convert_one_fmt_off_pair(
|
||||
fmt_off_prefix = fmt_off_prefix.split("\n")[-1]
|
||||
standalone_comment_prefix += fmt_off_prefix
|
||||
hidden_value = comment.value + "\n" + hidden_value
|
||||
if _contains_fmt_skip_comment(comment.value, mode):
|
||||
hidden_value += (
|
||||
comment.leading_whitespace
|
||||
if Preview.no_normalize_fmt_skip_whitespace in mode
|
||||
else " "
|
||||
) + comment.value
|
||||
if is_fmt_skip:
|
||||
hidden_value += comment.leading_whitespace + comment.value
|
||||
if hidden_value.endswith("\n"):
|
||||
# That happens when one of the `ignored_nodes` ended with a NEWLINE
|
||||
# leaf (possibly followed by a DEDENT).
|
||||
@ -273,7 +270,7 @@ def generate_ignored_nodes(
|
||||
Stops at the end of the block.
|
||||
"""
|
||||
if _contains_fmt_skip_comment(comment.value, mode):
|
||||
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment)
|
||||
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, mode)
|
||||
return
|
||||
container: Optional[LN] = container_of(leaf)
|
||||
while container is not None and container.type != token.ENDMARKER:
|
||||
@ -312,23 +309,67 @@ def generate_ignored_nodes(
|
||||
|
||||
|
||||
def _generate_ignored_nodes_from_fmt_skip(
|
||||
leaf: Leaf, comment: ProtoComment
|
||||
leaf: Leaf, comment: ProtoComment, mode: Mode
|
||||
) -> Iterator[LN]:
|
||||
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
|
||||
prev_sibling = leaf.prev_sibling
|
||||
parent = leaf.parent
|
||||
ignored_nodes: list[LN] = []
|
||||
# Need to properly format the leaf prefix to compare it to comment.value,
|
||||
# which is also formatted
|
||||
comments = list_comments(leaf.prefix, is_endmarker=False)
|
||||
if not comments or comment.value != comments[0].value:
|
||||
return
|
||||
if prev_sibling is not None:
|
||||
leaf.prefix = ""
|
||||
siblings = [prev_sibling]
|
||||
while "\n" not in prev_sibling.prefix and prev_sibling.prev_sibling is not None:
|
||||
prev_sibling = prev_sibling.prev_sibling
|
||||
siblings.insert(0, prev_sibling)
|
||||
yield from siblings
|
||||
leaf.prefix = leaf.prefix[comment.consumed :]
|
||||
|
||||
if Preview.fix_fmt_skip_in_one_liners not in mode:
|
||||
siblings = [prev_sibling]
|
||||
while (
|
||||
"\n" not in prev_sibling.prefix
|
||||
and prev_sibling.prev_sibling is not None
|
||||
):
|
||||
prev_sibling = prev_sibling.prev_sibling
|
||||
siblings.insert(0, prev_sibling)
|
||||
yield from siblings
|
||||
return
|
||||
|
||||
# Generates the nodes to be ignored by `fmt: skip`.
|
||||
|
||||
# Nodes to ignore are the ones on the same line as the
|
||||
# `# fmt: skip` comment, excluding the `# fmt: skip`
|
||||
# node itself.
|
||||
|
||||
# Traversal process (starting at the `# fmt: skip` node):
|
||||
# 1. Move to the `prev_sibling` of the current node.
|
||||
# 2. If `prev_sibling` has children, go to its rightmost leaf.
|
||||
# 3. If there’s no `prev_sibling`, move up to the parent
|
||||
# node and repeat.
|
||||
# 4. Continue until:
|
||||
# a. You encounter an `INDENT` or `NEWLINE` node (indicates
|
||||
# start of the line).
|
||||
# b. You reach the root node.
|
||||
|
||||
# Include all visited LEAVES in the ignored list, except INDENT
|
||||
# or NEWLINE leaves.
|
||||
|
||||
current_node = prev_sibling
|
||||
ignored_nodes = [current_node]
|
||||
if current_node.prev_sibling is None and current_node.parent is not None:
|
||||
current_node = current_node.parent
|
||||
while "\n" not in current_node.prefix and current_node.prev_sibling is not None:
|
||||
leaf_nodes = list(current_node.prev_sibling.leaves())
|
||||
current_node = leaf_nodes[-1] if leaf_nodes else current_node
|
||||
|
||||
if current_node.type in (token.NEWLINE, token.INDENT):
|
||||
current_node.prefix = ""
|
||||
break
|
||||
|
||||
ignored_nodes.insert(0, current_node)
|
||||
|
||||
if current_node.prev_sibling is None and current_node.parent is not None:
|
||||
current_node = current_node.parent
|
||||
yield from ignored_nodes
|
||||
elif (
|
||||
parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE
|
||||
):
|
||||
@ -336,7 +377,6 @@ def _generate_ignored_nodes_from_fmt_skip(
|
||||
# statements. The ignored nodes should be previous siblings of the
|
||||
# parent suite node.
|
||||
leaf.prefix = ""
|
||||
ignored_nodes: List[LN] = []
|
||||
parent_sibling = parent.prev_sibling
|
||||
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
||||
ignored_nodes.insert(0, parent_sibling)
|
||||
@ -376,7 +416,7 @@ def children_contains_fmt_on(container: LN) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def contains_pragma_comment(comment_list: List[Leaf]) -> bool:
|
||||
def contains_pragma_comment(comment_list: list[Leaf]) -> bool:
|
||||
"""
|
||||
Returns:
|
||||
True iff one of the comments in @comment_list is a pragma used by one
|
||||
|
@ -10,10 +10,11 @@
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
from collections.abc import Iterable
|
||||
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
||||
from multiprocessing import Manager
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Optional, Set
|
||||
from typing import Any, Optional
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
@ -69,7 +70,7 @@ def shutdown(loop: asyncio.AbstractEventLoop) -> None:
|
||||
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
||||
@mypyc_attr(patchable=True)
|
||||
def reformat_many(
|
||||
sources: Set[Path],
|
||||
sources: set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
@ -119,7 +120,7 @@ def reformat_many(
|
||||
|
||||
|
||||
async def schedule_formatting(
|
||||
sources: Set[Path],
|
||||
sources: set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
|
@ -1,5 +1,6 @@
|
||||
from collections.abc import Iterator
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Iterator, List, TypeVar, Union
|
||||
from typing import Any, TypeVar, Union
|
||||
|
||||
from black.nodes import Visitor
|
||||
from black.output import out
|
||||
@ -14,7 +15,7 @@
|
||||
@dataclass
|
||||
class DebugVisitor(Visitor[T]):
|
||||
tree_depth: int = 0
|
||||
list_output: List[str] = field(default_factory=list)
|
||||
list_output: list[str] = field(default_factory=list)
|
||||
print_output: bool = True
|
||||
|
||||
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
||||
|
@ -1,21 +1,11 @@
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
from collections.abc import Iterable, Iterator, Sequence
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from re import Pattern
|
||||
from typing import TYPE_CHECKING, Any, Optional, Union
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet
|
||||
@ -43,7 +33,7 @@
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _load_toml(path: Union[Path, str]) -> Dict[str, Any]:
|
||||
def _load_toml(path: Union[Path, str]) -> dict[str, Any]:
|
||||
with open(path, "rb") as f:
|
||||
return tomllib.load(f)
|
||||
|
||||
@ -56,9 +46,12 @@ def _cached_resolve(path: Path) -> Path:
|
||||
@lru_cache
|
||||
def find_project_root(
|
||||
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
||||
) -> Tuple[Path, str]:
|
||||
) -> tuple[Path, str]:
|
||||
"""Return a directory containing .git, .hg, or pyproject.toml.
|
||||
|
||||
pyproject.toml files are only considered if they contain a [tool.black]
|
||||
section and are ignored otherwise.
|
||||
|
||||
That directory will be a common parent of all files and directories
|
||||
passed in `srcs`.
|
||||
|
||||
@ -103,7 +96,7 @@ def find_project_root(
|
||||
|
||||
|
||||
def find_pyproject_toml(
|
||||
path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None
|
||||
path_search_start: tuple[str, ...], stdin_filename: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
||||
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
||||
@ -125,13 +118,13 @@ def find_pyproject_toml(
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
||||
def parse_pyproject_toml(path_config: str) -> dict[str, Any]:
|
||||
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
||||
|
||||
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
||||
"""
|
||||
pyproject_toml = _load_toml(path_config)
|
||||
config: Dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||
config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
||||
|
||||
if "target_version" not in config:
|
||||
@ -143,8 +136,8 @@ def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
||||
|
||||
|
||||
def infer_target_version(
|
||||
pyproject_toml: Dict[str, Any],
|
||||
) -> Optional[List[TargetVersion]]:
|
||||
pyproject_toml: dict[str, Any],
|
||||
) -> Optional[list[TargetVersion]]:
|
||||
"""Infer Black's target version from the project metadata in pyproject.toml.
|
||||
|
||||
Supports the PyPA standard format (PEP 621):
|
||||
@ -167,7 +160,7 @@ def infer_target_version(
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]:
|
||||
def parse_req_python_version(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
||||
@ -182,7 +175,7 @@ def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersio
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]:
|
||||
def parse_req_python_specifier(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
||||
@ -193,7 +186,7 @@ def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVers
|
||||
return None
|
||||
|
||||
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
||||
compatible_versions: List[str] = list(specifier_set.filter(target_version_map))
|
||||
compatible_versions: list[str] = list(specifier_set.filter(target_version_map))
|
||||
if compatible_versions:
|
||||
return [target_version_map[v] for v in compatible_versions]
|
||||
return None
|
||||
@ -248,7 +241,7 @@ def find_user_pyproject_toml() -> Path:
|
||||
def get_gitignore(root: Path) -> PathSpec:
|
||||
"""Return a PathSpec matching gitignore content if present."""
|
||||
gitignore = root / ".gitignore"
|
||||
lines: List[str] = []
|
||||
lines: list[str] = []
|
||||
if gitignore.is_file():
|
||||
with gitignore.open(encoding="utf-8") as gf:
|
||||
lines = gf.readlines()
|
||||
@ -269,8 +262,6 @@ def resolves_outside_root_or_cannot_stat(
|
||||
root directory. Also returns True if we failed to resolve the path.
|
||||
"""
|
||||
try:
|
||||
if sys.version_info < (3, 8, 6):
|
||||
path = path.absolute() # https://bugs.python.org/issue33660
|
||||
resolved_path = _cached_resolve(path)
|
||||
except OSError as e:
|
||||
if report:
|
||||
@ -301,7 +292,7 @@ def best_effort_relative_path(path: Path, root: Path) -> Path:
|
||||
def _path_is_ignored(
|
||||
root_relative_path: str,
|
||||
root: Path,
|
||||
gitignore_dict: Dict[Path, PathSpec],
|
||||
gitignore_dict: dict[Path, PathSpec],
|
||||
) -> bool:
|
||||
path = root / root_relative_path
|
||||
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
||||
@ -309,6 +300,8 @@ def _path_is_ignored(
|
||||
for gitignore_path, pattern in gitignore_dict.items():
|
||||
try:
|
||||
relative_path = path.relative_to(gitignore_path).as_posix()
|
||||
if path.is_dir():
|
||||
relative_path = relative_path + "/"
|
||||
except ValueError:
|
||||
break
|
||||
if pattern.match_file(relative_path):
|
||||
@ -332,7 +325,7 @@ def gen_python_files(
|
||||
extend_exclude: Optional[Pattern[str]],
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
report: Report,
|
||||
gitignore_dict: Optional[Dict[Path, PathSpec]],
|
||||
gitignore_dict: Optional[dict[Path, PathSpec]],
|
||||
*,
|
||||
verbose: bool,
|
||||
quiet: bool,
|
||||
|
@ -3,17 +3,19 @@
|
||||
import ast
|
||||
import collections
|
||||
import dataclasses
|
||||
import re
|
||||
import secrets
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from importlib.util import find_spec
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Optional
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
else:
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from black.mode import Mode
|
||||
from black.output import out
|
||||
from black.report import NothingChanged
|
||||
|
||||
@ -41,7 +43,6 @@
|
||||
"time",
|
||||
"timeit",
|
||||
))
|
||||
TOKEN_HEX = secrets.token_hex
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
@ -64,7 +65,35 @@ def jupyter_dependencies_are_installed(*, warn: bool) -> bool:
|
||||
return installed
|
||||
|
||||
|
||||
def remove_trailing_semicolon(src: str) -> Tuple[str, bool]:
|
||||
def validate_cell(src: str, mode: Mode) -> None:
|
||||
"""Check that cell does not already contain TransformerManager transformations,
|
||||
or non-Python cell magics, which might cause tokenizer_rt to break because of
|
||||
indentations.
|
||||
|
||||
If a cell contains ``!ls``, then it'll be transformed to
|
||||
``get_ipython().system('ls')``. However, if the cell originally contained
|
||||
``get_ipython().system('ls')``, then it would get transformed in the same way:
|
||||
|
||||
>>> TransformerManager().transform_cell("get_ipython().system('ls')")
|
||||
"get_ipython().system('ls')\n"
|
||||
>>> TransformerManager().transform_cell("!ls")
|
||||
"get_ipython().system('ls')\n"
|
||||
|
||||
Due to the impossibility of safely roundtripping in such situations, cells
|
||||
containing transformed magics will be ignored.
|
||||
"""
|
||||
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
|
||||
raise NothingChanged
|
||||
|
||||
line = _get_code_start(src)
|
||||
if line.startswith("%%") and (
|
||||
line.split(maxsplit=1)[0][2:]
|
||||
not in PYTHON_CELL_MAGICS | mode.python_cell_magics
|
||||
):
|
||||
raise NothingChanged
|
||||
|
||||
|
||||
def remove_trailing_semicolon(src: str) -> tuple[str, bool]:
|
||||
"""Remove trailing semicolon from Jupyter notebook cell.
|
||||
|
||||
For example,
|
||||
@ -120,7 +149,7 @@ def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
|
||||
return str(tokens_to_src(tokens))
|
||||
|
||||
|
||||
def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
||||
def mask_cell(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Mask IPython magics so content becomes parseable Python code.
|
||||
|
||||
For example,
|
||||
@ -130,12 +159,12 @@ def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
||||
|
||||
becomes
|
||||
|
||||
"25716f358c32750e"
|
||||
b"25716f358c32750"
|
||||
'foo'
|
||||
|
||||
The replacements are returned, along with the transformed code.
|
||||
"""
|
||||
replacements: List[Replacement] = []
|
||||
replacements: list[Replacement] = []
|
||||
try:
|
||||
ast.parse(src)
|
||||
except SyntaxError:
|
||||
@ -148,18 +177,32 @@ def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
||||
from IPython.core.inputtransformer2 import TransformerManager
|
||||
|
||||
transformer_manager = TransformerManager()
|
||||
# A side effect of the following transformation is that it also removes any
|
||||
# empty lines at the beginning of the cell.
|
||||
transformed = transformer_manager.transform_cell(src)
|
||||
transformed, cell_magic_replacements = replace_cell_magics(transformed)
|
||||
replacements += cell_magic_replacements
|
||||
transformed = transformer_manager.transform_cell(transformed)
|
||||
transformed, magic_replacements = replace_magics(transformed)
|
||||
if len(transformed.splitlines()) != len(src.splitlines()):
|
||||
if len(transformed.strip().splitlines()) != len(src.strip().splitlines()):
|
||||
# Multi-line magic, not supported.
|
||||
raise NothingChanged
|
||||
replacements += magic_replacements
|
||||
return transformed, replacements
|
||||
|
||||
|
||||
def create_token(n_chars: int) -> str:
|
||||
"""Create a randomly generated token that is n_chars characters long."""
|
||||
assert n_chars > 0
|
||||
n_bytes = max(n_chars // 2 - 1, 1)
|
||||
token = secrets.token_hex(n_bytes)
|
||||
if len(token) + 3 > n_chars:
|
||||
token = token[:-1]
|
||||
# We use a bytestring so that the string does not get interpreted
|
||||
# as a docstring.
|
||||
return f'b"{token}"'
|
||||
|
||||
|
||||
def get_token(src: str, magic: str) -> str:
|
||||
"""Return randomly generated token to mask IPython magic with.
|
||||
|
||||
@ -169,11 +212,11 @@ def get_token(src: str, magic: str) -> str:
|
||||
not already present anywhere else in the cell.
|
||||
"""
|
||||
assert magic
|
||||
nbytes = max(len(magic) // 2 - 1, 1)
|
||||
token = TOKEN_HEX(nbytes)
|
||||
n_chars = len(magic)
|
||||
token = create_token(n_chars)
|
||||
counter = 0
|
||||
while token in src:
|
||||
token = TOKEN_HEX(nbytes)
|
||||
token = create_token(n_chars)
|
||||
counter += 1
|
||||
if counter > 100:
|
||||
raise AssertionError(
|
||||
@ -181,12 +224,10 @@ def get_token(src: str, magic: str) -> str:
|
||||
"Please report a bug on https://github.com/psf/black/issues. "
|
||||
f"The magic might be helpful: {magic}"
|
||||
) from None
|
||||
if len(token) + 2 < len(magic):
|
||||
token = f"{token}."
|
||||
return f'"{token}"'
|
||||
return token
|
||||
|
||||
|
||||
def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Replace cell magic with token.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
@ -203,7 +244,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
|
||||
The replacement, along with the transformed code, is returned.
|
||||
"""
|
||||
replacements: List[Replacement] = []
|
||||
replacements: list[Replacement] = []
|
||||
|
||||
tree = ast.parse(src)
|
||||
|
||||
@ -217,7 +258,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
||||
|
||||
|
||||
def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
def replace_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Replace magics within body of cell.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
@ -239,7 +280,7 @@ def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
magic_finder = MagicFinder()
|
||||
magic_finder.visit(ast.parse(src))
|
||||
new_srcs = []
|
||||
for i, line in enumerate(src.splitlines(), start=1):
|
||||
for i, line in enumerate(src.split("\n"), start=1):
|
||||
if i in magic_finder.magics:
|
||||
offsets_and_magics = magic_finder.magics[i]
|
||||
if len(offsets_and_magics) != 1: # pragma: nocover
|
||||
@ -258,7 +299,7 @@ def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
||||
return "\n".join(new_srcs), replacements
|
||||
|
||||
|
||||
def unmask_cell(src: str, replacements: List[Replacement]) -> str:
|
||||
def unmask_cell(src: str, replacements: list[Replacement]) -> str:
|
||||
"""Remove replacements from cell.
|
||||
|
||||
For example
|
||||
@ -276,6 +317,21 @@ def unmask_cell(src: str, replacements: List[Replacement]) -> str:
|
||||
return src
|
||||
|
||||
|
||||
def _get_code_start(src: str) -> str:
|
||||
"""Provides the first line where the code starts.
|
||||
|
||||
Iterates over lines of code until it finds the first line that doesn't
|
||||
contain only empty spaces and comments. It removes any empty spaces at the
|
||||
start of the line and returns it. If such line doesn't exist, it returns an
|
||||
empty string.
|
||||
"""
|
||||
for match in re.finditer(".+", src):
|
||||
line = match.group(0).lstrip()
|
||||
if line and not line.startswith("#"):
|
||||
return line
|
||||
return ""
|
||||
|
||||
|
||||
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
||||
"""Check if attribute is IPython magic.
|
||||
|
||||
@ -291,11 +347,11 @@ def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
||||
)
|
||||
|
||||
|
||||
def _get_str_args(args: List[ast.expr]) -> List[str]:
|
||||
def _get_str_args(args: list[ast.expr]) -> list[str]:
|
||||
str_args = []
|
||||
for arg in args:
|
||||
assert isinstance(arg, ast.Str)
|
||||
str_args.append(arg.s)
|
||||
assert isinstance(arg, ast.Constant) and isinstance(arg.value, str)
|
||||
str_args.append(arg.value)
|
||||
return str_args
|
||||
|
||||
|
||||
@ -375,7 +431,7 @@ class MagicFinder(ast.NodeVisitor):
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list)
|
||||
self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list)
|
||||
|
||||
def visit_Assign(self, node: ast.Assign) -> None:
|
||||
"""Look for system assign magics.
|
||||
|
@ -4,10 +4,11 @@
|
||||
|
||||
import re
|
||||
import sys
|
||||
from collections.abc import Collection, Iterator
|
||||
from dataclasses import replace
|
||||
from enum import Enum, auto
|
||||
from functools import partial, wraps
|
||||
from typing import Collection, Iterator, List, Optional, Set, Union, cast
|
||||
from typing import Optional, Union, cast
|
||||
|
||||
from black.brackets import (
|
||||
COMMA_PRIORITY,
|
||||
@ -39,11 +40,13 @@
|
||||
ensure_visible,
|
||||
fstring_to_string,
|
||||
get_annotation_type,
|
||||
has_sibling_with_type,
|
||||
is_arith_like,
|
||||
is_async_stmt_or_funcdef,
|
||||
is_atom_with_invisible_parens,
|
||||
is_docstring,
|
||||
is_empty_tuple,
|
||||
is_generator,
|
||||
is_lpar_token,
|
||||
is_multiline_string,
|
||||
is_name_token,
|
||||
@ -54,6 +57,8 @@
|
||||
is_rpar_token,
|
||||
is_stub_body,
|
||||
is_stub_suite,
|
||||
is_tuple,
|
||||
is_tuple_containing_star,
|
||||
is_tuple_containing_walrus,
|
||||
is_type_ignore_comment_string,
|
||||
is_vararg,
|
||||
@ -64,7 +69,7 @@
|
||||
)
|
||||
from black.numerics import normalize_numeric_literal
|
||||
from black.strings import (
|
||||
fix_docstring,
|
||||
fix_multiline_docstring,
|
||||
get_string_prefix,
|
||||
normalize_string_prefix,
|
||||
normalize_string_quotes,
|
||||
@ -197,7 +202,7 @@ def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
|
||||
yield from self.line(-1)
|
||||
|
||||
def visit_stmt(
|
||||
self, node: Node, keywords: Set[str], parens: Set[str]
|
||||
self, node: Node, keywords: set[str], parens: set[str]
|
||||
) -> Iterator[Line]:
|
||||
"""Visit a statement.
|
||||
|
||||
@ -411,10 +416,9 @@ def foo(a: (int), b: (float) = 7): ...
|
||||
yield from self.visit_default(node)
|
||||
|
||||
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
||||
if Preview.hex_codes_in_unicode_sequences in self.mode:
|
||||
normalize_unicode_escape_sequences(leaf)
|
||||
normalize_unicode_escape_sequences(leaf)
|
||||
|
||||
if is_docstring(leaf, self.mode) and not re.search(r"\\\s*\n", leaf.value):
|
||||
if is_docstring(leaf) and not re.search(r"\\\s*\n", leaf.value):
|
||||
# We're ignoring docstrings with backslash newline escapes because changing
|
||||
# indentation of those changes the AST representation of the code.
|
||||
if self.mode.string_normalization:
|
||||
@ -441,7 +445,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
||||
indent = " " * 4 * self.current_line.depth
|
||||
|
||||
if is_multiline_string(leaf):
|
||||
docstring = fix_docstring(docstring, indent)
|
||||
docstring = fix_multiline_docstring(docstring, indent)
|
||||
else:
|
||||
docstring = docstring.strip()
|
||||
|
||||
@ -485,10 +489,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
||||
and len(indent) + quote_len <= self.mode.line_length
|
||||
and not has_trailing_backslash
|
||||
):
|
||||
if (
|
||||
Preview.docstring_check_for_newline in self.mode
|
||||
and leaf.value[-1 - quote_len] == "\n"
|
||||
):
|
||||
if leaf.value[-1 - quote_len] == "\n":
|
||||
leaf.value = prefix + quote + docstring + quote
|
||||
else:
|
||||
leaf.value = prefix + quote + docstring + "\n" + indent + quote
|
||||
@ -506,10 +507,32 @@ def visit_NUMBER(self, leaf: Leaf) -> Iterator[Line]:
|
||||
normalize_numeric_literal(leaf)
|
||||
yield from self.visit_default(leaf)
|
||||
|
||||
def visit_atom(self, node: Node) -> Iterator[Line]:
|
||||
"""Visit any atom"""
|
||||
if len(node.children) == 3:
|
||||
first = node.children[0]
|
||||
last = node.children[-1]
|
||||
if (first.type == token.LSQB and last.type == token.RSQB) or (
|
||||
first.type == token.LBRACE and last.type == token.RBRACE
|
||||
):
|
||||
# Lists or sets of one item
|
||||
maybe_make_parens_invisible_in_atom(node.children[1], parent=node)
|
||||
|
||||
yield from self.visit_default(node)
|
||||
|
||||
def visit_fstring(self, node: Node) -> Iterator[Line]:
|
||||
# currently we don't want to format and split f-strings at all.
|
||||
string_leaf = fstring_to_string(node)
|
||||
node.replace(string_leaf)
|
||||
if "\\" in string_leaf.value and any(
|
||||
"\\" in str(child)
|
||||
for child in node.children
|
||||
if child.type == syms.fstring_replacement_field
|
||||
):
|
||||
# string normalization doesn't account for nested quotes,
|
||||
# causing breakages. skip normalization when nested quotes exist
|
||||
yield from self.visit_default(string_leaf)
|
||||
return
|
||||
yield from self.visit_STRING(string_leaf)
|
||||
|
||||
# TODO: Uncomment Implementation to format f-string children
|
||||
@ -550,7 +573,7 @@ def __post_init__(self) -> None:
|
||||
self.current_line = Line(mode=self.mode)
|
||||
|
||||
v = self.visit_stmt
|
||||
Ø: Set[str] = set()
|
||||
Ø: set[str] = set()
|
||||
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
|
||||
self.visit_if_stmt = partial(
|
||||
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
|
||||
@ -574,8 +597,7 @@ def __post_init__(self) -> None:
|
||||
# PEP 634
|
||||
self.visit_match_stmt = self.visit_match_case
|
||||
self.visit_case_block = self.visit_match_case
|
||||
if Preview.remove_redundant_guard_parens in self.mode:
|
||||
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
|
||||
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
|
||||
|
||||
|
||||
def _hugging_power_ops_line_to_string(
|
||||
@ -617,7 +639,7 @@ def transform_line(
|
||||
string_split = StringSplitter(ll, sn)
|
||||
string_paren_wrap = StringParenWrapper(ll, sn)
|
||||
|
||||
transformers: List[Transformer]
|
||||
transformers: list[Transformer]
|
||||
if (
|
||||
not line.contains_uncollapsable_type_comments()
|
||||
and not line.should_split_rhs
|
||||
@ -717,7 +739,7 @@ def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool:
|
||||
"""If a funcdef has a magic trailing comma in the return type, then we should first
|
||||
split the line with rhs to respect the comma.
|
||||
"""
|
||||
return_type_leaves: List[Leaf] = []
|
||||
return_type_leaves: list[Leaf] = []
|
||||
in_return_type = False
|
||||
|
||||
for leaf in line.leaves:
|
||||
@ -759,26 +781,29 @@ def left_hand_split(
|
||||
Prefer RHS otherwise. This is why this function is not symmetrical with
|
||||
:func:`right_hand_split` which also handles optional parentheses.
|
||||
"""
|
||||
tail_leaves: List[Leaf] = []
|
||||
body_leaves: List[Leaf] = []
|
||||
head_leaves: List[Leaf] = []
|
||||
current_leaves = head_leaves
|
||||
matching_bracket: Optional[Leaf] = None
|
||||
for leaf in line.leaves:
|
||||
if (
|
||||
current_leaves is body_leaves
|
||||
and leaf.type in CLOSING_BRACKETS
|
||||
and leaf.opening_bracket is matching_bracket
|
||||
and isinstance(matching_bracket, Leaf)
|
||||
):
|
||||
ensure_visible(leaf)
|
||||
ensure_visible(matching_bracket)
|
||||
current_leaves = tail_leaves if body_leaves else head_leaves
|
||||
current_leaves.append(leaf)
|
||||
if current_leaves is head_leaves:
|
||||
if leaf.type in OPENING_BRACKETS:
|
||||
matching_bracket = leaf
|
||||
current_leaves = body_leaves
|
||||
for leaf_type in [token.LPAR, token.LSQB]:
|
||||
tail_leaves: list[Leaf] = []
|
||||
body_leaves: list[Leaf] = []
|
||||
head_leaves: list[Leaf] = []
|
||||
current_leaves = head_leaves
|
||||
matching_bracket: Optional[Leaf] = None
|
||||
for leaf in line.leaves:
|
||||
if (
|
||||
current_leaves is body_leaves
|
||||
and leaf.type in CLOSING_BRACKETS
|
||||
and leaf.opening_bracket is matching_bracket
|
||||
and isinstance(matching_bracket, Leaf)
|
||||
):
|
||||
ensure_visible(leaf)
|
||||
ensure_visible(matching_bracket)
|
||||
current_leaves = tail_leaves if body_leaves else head_leaves
|
||||
current_leaves.append(leaf)
|
||||
if current_leaves is head_leaves:
|
||||
if leaf.type == leaf_type:
|
||||
matching_bracket = leaf
|
||||
current_leaves = body_leaves
|
||||
if matching_bracket and tail_leaves:
|
||||
break
|
||||
if not matching_bracket or not tail_leaves:
|
||||
raise CannotSplit("No brackets found")
|
||||
|
||||
@ -827,9 +852,9 @@ def _first_right_hand_split(
|
||||
_maybe_split_omitting_optional_parens to get an opinion whether to prefer
|
||||
splitting on the right side of an assignment statement.
|
||||
"""
|
||||
tail_leaves: List[Leaf] = []
|
||||
body_leaves: List[Leaf] = []
|
||||
head_leaves: List[Leaf] = []
|
||||
tail_leaves: list[Leaf] = []
|
||||
body_leaves: list[Leaf] = []
|
||||
head_leaves: list[Leaf] = []
|
||||
current_leaves = tail_leaves
|
||||
opening_bracket: Optional[Leaf] = None
|
||||
closing_bracket: Optional[Leaf] = None
|
||||
@ -860,8 +885,8 @@ def _first_right_hand_split(
|
||||
and tail_leaves[0].opening_bracket is head_leaves[-1]
|
||||
):
|
||||
inner_body_leaves = list(body_leaves)
|
||||
hugged_opening_leaves: List[Leaf] = []
|
||||
hugged_closing_leaves: List[Leaf] = []
|
||||
hugged_opening_leaves: list[Leaf] = []
|
||||
hugged_closing_leaves: list[Leaf] = []
|
||||
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
|
||||
unpacking_offset: int = 1 if is_unpacking else 0
|
||||
while (
|
||||
@ -945,29 +970,7 @@ def _maybe_split_omitting_optional_parens(
|
||||
try:
|
||||
# The RHSResult Omitting Optional Parens.
|
||||
rhs_oop = _first_right_hand_split(line, omit=omit)
|
||||
is_split_right_after_equal = (
|
||||
len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL
|
||||
)
|
||||
rhs_head_contains_brackets = any(
|
||||
leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]
|
||||
)
|
||||
# the -1 is for the ending optional paren
|
||||
rhs_head_short_enough = is_line_short_enough(
|
||||
rhs.head, mode=replace(mode, line_length=mode.line_length - 1)
|
||||
)
|
||||
rhs_head_explode_blocked_by_magic_trailing_comma = (
|
||||
rhs.head.magic_trailing_comma is None
|
||||
)
|
||||
if (
|
||||
not (
|
||||
is_split_right_after_equal
|
||||
and rhs_head_contains_brackets
|
||||
and rhs_head_short_enough
|
||||
and rhs_head_explode_blocked_by_magic_trailing_comma
|
||||
)
|
||||
# the omit optional parens split is preferred by some other reason
|
||||
or _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode)
|
||||
):
|
||||
if _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode):
|
||||
yield from _maybe_split_omitting_optional_parens(
|
||||
rhs_oop, line, mode, features=features, omit=omit
|
||||
)
|
||||
@ -978,8 +981,15 @@ def _maybe_split_omitting_optional_parens(
|
||||
if line.is_chained_assignment:
|
||||
pass
|
||||
|
||||
elif not can_be_split(rhs.body) and not is_line_short_enough(
|
||||
rhs.body, mode=mode
|
||||
elif (
|
||||
not can_be_split(rhs.body)
|
||||
and not is_line_short_enough(rhs.body, mode=mode)
|
||||
and not (
|
||||
Preview.wrap_long_dict_values_in_parens
|
||||
and rhs.opening_bracket.parent
|
||||
and rhs.opening_bracket.parent.parent
|
||||
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
|
||||
)
|
||||
):
|
||||
raise CannotSplit(
|
||||
"Splitting failed, body is still too long and can't be split."
|
||||
@ -1010,6 +1020,44 @@ def _prefer_split_rhs_oop_over_rhs(
|
||||
Returns whether we should prefer the result from a split omitting optional parens
|
||||
(rhs_oop) over the original (rhs).
|
||||
"""
|
||||
# contains unsplittable type ignore
|
||||
if (
|
||||
rhs_oop.head.contains_unsplittable_type_ignore()
|
||||
or rhs_oop.body.contains_unsplittable_type_ignore()
|
||||
or rhs_oop.tail.contains_unsplittable_type_ignore()
|
||||
):
|
||||
return True
|
||||
|
||||
# Retain optional parens around dictionary values
|
||||
if (
|
||||
Preview.wrap_long_dict_values_in_parens
|
||||
and rhs.opening_bracket.parent
|
||||
and rhs.opening_bracket.parent.parent
|
||||
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
|
||||
and rhs.body.bracket_tracker.delimiters
|
||||
):
|
||||
# Unless the split is inside the key
|
||||
return any(leaf.type == token.COLON for leaf in rhs_oop.tail.leaves)
|
||||
|
||||
# the split is right after `=`
|
||||
if not (len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL):
|
||||
return True
|
||||
|
||||
# the left side of assignment contains brackets
|
||||
if not any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]):
|
||||
return True
|
||||
|
||||
# the left side of assignment is short enough (the -1 is for the ending optional
|
||||
# paren)
|
||||
if not is_line_short_enough(
|
||||
rhs.head, mode=replace(mode, line_length=mode.line_length - 1)
|
||||
):
|
||||
return True
|
||||
|
||||
# the left side of assignment won't explode further because of magic trailing comma
|
||||
if rhs.head.magic_trailing_comma is not None:
|
||||
return True
|
||||
|
||||
# If we have multiple targets, we prefer more `=`s on the head vs pushing them to
|
||||
# the body
|
||||
rhs_head_equal_count = [leaf.type for leaf in rhs.head.leaves].count(token.EQUAL)
|
||||
@ -1037,10 +1085,6 @@ def _prefer_split_rhs_oop_over_rhs(
|
||||
# the first line is short enough
|
||||
and is_line_short_enough(rhs_oop.head, mode=mode)
|
||||
)
|
||||
# contains unsplittable type ignore
|
||||
or rhs_oop.head.contains_unsplittable_type_ignore()
|
||||
or rhs_oop.body.contains_unsplittable_type_ignore()
|
||||
or rhs_oop.tail.contains_unsplittable_type_ignore()
|
||||
)
|
||||
|
||||
|
||||
@ -1070,8 +1114,44 @@ def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None
|
||||
)
|
||||
|
||||
|
||||
def _ensure_trailing_comma(
|
||||
leaves: list[Leaf], original: Line, opening_bracket: Leaf
|
||||
) -> bool:
|
||||
if not leaves:
|
||||
return False
|
||||
# Ensure a trailing comma for imports
|
||||
if original.is_import:
|
||||
return True
|
||||
# ...and standalone function arguments
|
||||
if not original.is_def:
|
||||
return False
|
||||
if opening_bracket.value != "(":
|
||||
return False
|
||||
# Don't add commas if we already have any commas
|
||||
if any(
|
||||
leaf.type == token.COMMA and not is_part_of_annotation(leaf) for leaf in leaves
|
||||
):
|
||||
return False
|
||||
|
||||
# Find a leaf with a parent (comments don't have parents)
|
||||
leaf_with_parent = next((leaf for leaf in leaves if leaf.parent), None)
|
||||
if leaf_with_parent is None:
|
||||
return True
|
||||
# Don't add commas inside parenthesized return annotations
|
||||
if get_annotation_type(leaf_with_parent) == "return":
|
||||
return False
|
||||
# Don't add commas inside PEP 604 unions
|
||||
if (
|
||||
leaf_with_parent.parent
|
||||
and leaf_with_parent.parent.next_sibling
|
||||
and leaf_with_parent.parent.next_sibling.type == token.VBAR
|
||||
):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def bracket_split_build_line(
|
||||
leaves: List[Leaf],
|
||||
leaves: list[Leaf],
|
||||
original: Line,
|
||||
opening_bracket: Leaf,
|
||||
*,
|
||||
@ -1090,42 +1170,17 @@ def bracket_split_build_line(
|
||||
if component is _BracketSplitComponent.body:
|
||||
result.inside_brackets = True
|
||||
result.depth += 1
|
||||
if leaves:
|
||||
no_commas = (
|
||||
# Ensure a trailing comma for imports and standalone function arguments
|
||||
original.is_def
|
||||
# Don't add one after any comments or within type annotations
|
||||
and opening_bracket.value == "("
|
||||
# Don't add one if there's already one there
|
||||
and not any(
|
||||
leaf.type == token.COMMA
|
||||
and (
|
||||
Preview.typed_params_trailing_comma not in original.mode
|
||||
or not is_part_of_annotation(leaf)
|
||||
)
|
||||
for leaf in leaves
|
||||
)
|
||||
# Don't add one inside parenthesized return annotations
|
||||
and get_annotation_type(leaves[0]) != "return"
|
||||
# Don't add one inside PEP 604 unions
|
||||
and not (
|
||||
leaves[0].parent
|
||||
and leaves[0].parent.next_sibling
|
||||
and leaves[0].parent.next_sibling.type == token.VBAR
|
||||
)
|
||||
)
|
||||
if _ensure_trailing_comma(leaves, original, opening_bracket):
|
||||
for i in range(len(leaves) - 1, -1, -1):
|
||||
if leaves[i].type == STANDALONE_COMMENT:
|
||||
continue
|
||||
|
||||
if original.is_import or no_commas:
|
||||
for i in range(len(leaves) - 1, -1, -1):
|
||||
if leaves[i].type == STANDALONE_COMMENT:
|
||||
continue
|
||||
if leaves[i].type != token.COMMA:
|
||||
new_comma = Leaf(token.COMMA, ",")
|
||||
leaves.insert(i + 1, new_comma)
|
||||
break
|
||||
|
||||
if leaves[i].type != token.COMMA:
|
||||
new_comma = Leaf(token.COMMA, ",")
|
||||
leaves.insert(i + 1, new_comma)
|
||||
break
|
||||
|
||||
leaves_to_track: Set[LeafID] = set()
|
||||
leaves_to_track: set[LeafID] = set()
|
||||
if component is _BracketSplitComponent.head:
|
||||
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
||||
# Populate the line
|
||||
@ -1317,7 +1372,7 @@ def append_to_line(leaf: Leaf) -> Iterator[Line]:
|
||||
|
||||
|
||||
def normalize_invisible_parens( # noqa: C901
|
||||
node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature]
|
||||
node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature]
|
||||
) -> None:
|
||||
"""Make existing optional parentheses invisible or create new ones.
|
||||
|
||||
@ -1355,11 +1410,7 @@ def normalize_invisible_parens( # noqa: C901
|
||||
)
|
||||
|
||||
# Add parentheses around if guards in case blocks
|
||||
if (
|
||||
isinstance(child, Node)
|
||||
and child.type == syms.guard
|
||||
and Preview.parens_for_long_if_clauses_in_case_block in mode
|
||||
):
|
||||
if isinstance(child, Node) and child.type == syms.guard:
|
||||
normalize_invisible_parens(
|
||||
child, parens_after={"if"}, mode=mode, features=features
|
||||
)
|
||||
@ -1577,6 +1628,12 @@ def maybe_make_parens_invisible_in_atom(
|
||||
node.type not in (syms.atom, syms.expr)
|
||||
or is_empty_tuple(node)
|
||||
or is_one_tuple(node)
|
||||
or (is_tuple(node) and parent.type == syms.asexpr_test)
|
||||
or (
|
||||
is_tuple(node)
|
||||
and parent.type == syms.with_stmt
|
||||
and has_sibling_with_type(node, token.COMMA)
|
||||
)
|
||||
or (is_yield(node) and parent.type != syms.expr_stmt)
|
||||
or (
|
||||
# This condition tries to prevent removing non-optional brackets
|
||||
@ -1586,6 +1643,8 @@ def maybe_make_parens_invisible_in_atom(
|
||||
and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
|
||||
)
|
||||
or is_tuple_containing_walrus(node)
|
||||
or is_tuple_containing_star(node)
|
||||
or is_generator(node)
|
||||
):
|
||||
return False
|
||||
|
||||
@ -1598,6 +1657,7 @@ def maybe_make_parens_invisible_in_atom(
|
||||
syms.except_clause,
|
||||
syms.funcdef,
|
||||
syms.with_stmt,
|
||||
syms.testlist_gexp,
|
||||
syms.tname,
|
||||
# these ones aren't useful to end users, but they do please fuzzers
|
||||
syms.for_stmt,
|
||||
@ -1617,9 +1677,6 @@ def maybe_make_parens_invisible_in_atom(
|
||||
not is_type_ignore_comment_string(middle.prefix.strip())
|
||||
):
|
||||
first.value = ""
|
||||
if first.prefix.strip():
|
||||
# Preserve comments before first paren
|
||||
middle.prefix = first.prefix + middle.prefix
|
||||
last.value = ""
|
||||
maybe_make_parens_invisible_in_atom(
|
||||
middle,
|
||||
@ -1631,6 +1688,13 @@ def maybe_make_parens_invisible_in_atom(
|
||||
# Strip the invisible parens from `middle` by replacing
|
||||
# it with the child in-between the invisible parens
|
||||
middle.replace(middle.children[1])
|
||||
|
||||
if middle.children[0].prefix.strip():
|
||||
# Preserve comments before first paren
|
||||
middle.children[1].prefix = (
|
||||
middle.children[0].prefix + middle.children[1].prefix
|
||||
)
|
||||
|
||||
if middle.children[-1].prefix.strip():
|
||||
# Preserve comments before last paren
|
||||
last.prefix = middle.children[-1].prefix + last.prefix
|
||||
@ -1667,7 +1731,7 @@ def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
|
||||
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]:
|
||||
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
|
||||
|
||||
Brackets can be omitted if the entire trailer up to and including
|
||||
@ -1678,14 +1742,14 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf
|
||||
the one that needs to explode are omitted.
|
||||
"""
|
||||
|
||||
omit: Set[LeafID] = set()
|
||||
omit: set[LeafID] = set()
|
||||
if not line.magic_trailing_comma:
|
||||
yield omit
|
||||
|
||||
length = 4 * line.depth
|
||||
opening_bracket: Optional[Leaf] = None
|
||||
closing_bracket: Optional[Leaf] = None
|
||||
inner_brackets: Set[LeafID] = set()
|
||||
inner_brackets: set[LeafID] = set()
|
||||
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
|
||||
length += leaf_length
|
||||
if length > line_length:
|
||||
@ -1750,10 +1814,10 @@ def run_transformer(
|
||||
features: Collection[Feature],
|
||||
*,
|
||||
line_str: str = "",
|
||||
) -> List[Line]:
|
||||
) -> list[Line]:
|
||||
if not line_str:
|
||||
line_str = line_to_string(line)
|
||||
result: List[Line] = []
|
||||
result: list[Line] = []
|
||||
for transformed_line in transform(line, features, mode):
|
||||
if str(transformed_line).strip("\n") == line_str:
|
||||
raise CannotTransform("Line transformer returned an unchanged result")
|
||||
|
@ -1,18 +1,8 @@
|
||||
import itertools
|
||||
import math
|
||||
from collections.abc import Callable, Iterator, Sequence
|
||||
from dataclasses import dataclass, field
|
||||
from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from typing import Optional, TypeVar, Union, cast
|
||||
|
||||
from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
|
||||
from black.mode import Mode, Preview
|
||||
@ -52,9 +42,9 @@ class Line:
|
||||
|
||||
mode: Mode = field(repr=False)
|
||||
depth: int = 0
|
||||
leaves: List[Leaf] = field(default_factory=list)
|
||||
leaves: list[Leaf] = field(default_factory=list)
|
||||
# keys ordered like `leaves`
|
||||
comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
|
||||
comments: dict[LeafID, list[Leaf]] = field(default_factory=dict)
|
||||
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
|
||||
inside_brackets: bool = False
|
||||
should_split_rhs: bool = False
|
||||
@ -214,9 +204,7 @@ def _is_triple_quoted_string(self) -> bool:
|
||||
@property
|
||||
def is_docstring(self) -> bool:
|
||||
"""Is the line a docstring?"""
|
||||
if Preview.unify_docstring_detection not in self.mode:
|
||||
return self._is_triple_quoted_string
|
||||
return bool(self) and is_docstring(self.leaves[0], self.mode)
|
||||
return bool(self) and is_docstring(self.leaves[0])
|
||||
|
||||
@property
|
||||
def is_chained_assignment(self) -> bool:
|
||||
@ -426,7 +414,7 @@ def append_comment(self, comment: Leaf) -> bool:
|
||||
self.comments.setdefault(id(last_leaf), []).append(comment)
|
||||
return True
|
||||
|
||||
def comments_after(self, leaf: Leaf) -> List[Leaf]:
|
||||
def comments_after(self, leaf: Leaf) -> list[Leaf]:
|
||||
"""Generate comments that should appear directly after `leaf`."""
|
||||
return self.comments.get(id(leaf), [])
|
||||
|
||||
@ -459,13 +447,13 @@ def is_complex_subscript(self, leaf: Leaf) -> bool:
|
||||
|
||||
def enumerate_with_length(
|
||||
self, is_reversed: bool = False
|
||||
) -> Iterator[Tuple[Index, Leaf, int]]:
|
||||
) -> Iterator[tuple[Index, Leaf, int]]:
|
||||
"""Return an enumeration of leaves with their length.
|
||||
|
||||
Stops prematurely on multiline strings and standalone comments.
|
||||
"""
|
||||
op = cast(
|
||||
Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
|
||||
Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]],
|
||||
enumerate_reversed if is_reversed else enumerate,
|
||||
)
|
||||
for index, leaf in op(self.leaves):
|
||||
@ -531,11 +519,11 @@ class LinesBlock:
|
||||
previous_block: Optional["LinesBlock"]
|
||||
original_line: Line
|
||||
before: int = 0
|
||||
content_lines: List[str] = field(default_factory=list)
|
||||
content_lines: list[str] = field(default_factory=list)
|
||||
after: int = 0
|
||||
form_feed: bool = False
|
||||
|
||||
def all_lines(self) -> List[str]:
|
||||
def all_lines(self) -> list[str]:
|
||||
empty_line = str(Line(mode=self.mode))
|
||||
prefix = make_simple_prefix(self.before, self.form_feed, empty_line)
|
||||
return [prefix] + self.content_lines + [empty_line * self.after]
|
||||
@ -554,7 +542,7 @@ class EmptyLineTracker:
|
||||
mode: Mode
|
||||
previous_line: Optional[Line] = None
|
||||
previous_block: Optional[LinesBlock] = None
|
||||
previous_defs: List[Line] = field(default_factory=list)
|
||||
previous_defs: list[Line] = field(default_factory=list)
|
||||
semantic_leading_comment: Optional[LinesBlock] = None
|
||||
|
||||
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
||||
@ -607,7 +595,7 @@ def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
||||
self.previous_block = block
|
||||
return block
|
||||
|
||||
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C901
|
||||
def _maybe_empty_lines(self, current_line: Line) -> tuple[int, int]: # noqa: C901
|
||||
max_allowed = 1
|
||||
if current_line.depth == 0:
|
||||
max_allowed = 1 if self.mode.is_pyi else 2
|
||||
@ -681,6 +669,15 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C9
|
||||
current_line, before, user_had_newline
|
||||
)
|
||||
|
||||
if (
|
||||
self.previous_line.is_import
|
||||
and self.previous_line.depth == 0
|
||||
and current_line.depth == 0
|
||||
and not current_line.is_import
|
||||
and Preview.always_one_newline_after_import in self.mode
|
||||
):
|
||||
return 1, 0
|
||||
|
||||
if (
|
||||
self.previous_line.is_import
|
||||
and not current_line.is_import
|
||||
@ -693,7 +690,7 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C9
|
||||
|
||||
def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
||||
self, current_line: Line, before: int, user_had_newline: bool
|
||||
) -> Tuple[int, int]:
|
||||
) -> tuple[int, int]:
|
||||
assert self.previous_line is not None
|
||||
|
||||
if self.previous_line.is_decorator:
|
||||
@ -772,7 +769,7 @@ def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
||||
return newlines, 0
|
||||
|
||||
|
||||
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
||||
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]:
|
||||
"""Like `reversed(enumerate(sequence))` if that were possible."""
|
||||
index = len(sequence) - 1
|
||||
for element in reversed(sequence):
|
||||
@ -781,7 +778,7 @@ def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
||||
|
||||
|
||||
def append_leaves(
|
||||
new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
|
||||
new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Append leaves (taken from @old_line) to @new_line, making sure to fix the
|
||||
@ -838,10 +835,10 @@ def is_line_short_enough( # noqa: C901
|
||||
# Depth (which is based on the existing bracket_depth concept)
|
||||
# is needed to determine nesting level of the MLS.
|
||||
# Includes special case for trailing commas.
|
||||
commas: List[int] = [] # tracks number of commas per depth level
|
||||
commas: list[int] = [] # tracks number of commas per depth level
|
||||
multiline_string: Optional[Leaf] = None
|
||||
# store the leaves that contain parts of the MLS
|
||||
multiline_string_contexts: List[LN] = []
|
||||
multiline_string_contexts: list[LN] = []
|
||||
|
||||
max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS
|
||||
for i, leaf in enumerate(line.leaves):
|
||||
@ -865,7 +862,7 @@ def is_line_short_enough( # noqa: C901
|
||||
if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
|
||||
# Inside brackets, ignore trailing comma
|
||||
# directly after MLS/MLS-containing expression
|
||||
ignore_ctxs: List[Optional[LN]] = [None]
|
||||
ignore_ctxs: list[Optional[LN]] = [None]
|
||||
ignore_ctxs += multiline_string_contexts
|
||||
if (line.inside_brackets or leaf.bracket_depth > 0) and (
|
||||
i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs
|
||||
|
@ -8,7 +8,7 @@
|
||||
from enum import Enum, auto
|
||||
from hashlib import sha256
|
||||
from operator import attrgetter
|
||||
from typing import Dict, Final, Set
|
||||
from typing import Final
|
||||
|
||||
from black.const import DEFAULT_LINE_LENGTH
|
||||
|
||||
@ -26,6 +26,10 @@ class TargetVersion(Enum):
|
||||
PY312 = 12
|
||||
PY313 = 13
|
||||
|
||||
def pretty(self) -> str:
|
||||
assert self.name[:2] == "PY"
|
||||
return f"Python {self.name[2]}.{self.name[3:]}"
|
||||
|
||||
|
||||
class Feature(Enum):
|
||||
F_STRINGS = 2
|
||||
@ -60,7 +64,7 @@ class Feature(Enum):
|
||||
}
|
||||
|
||||
|
||||
VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
|
||||
VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = {
|
||||
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
||||
@ -185,34 +189,26 @@ class Feature(Enum):
|
||||
}
|
||||
|
||||
|
||||
def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
|
||||
def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool:
|
||||
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
||||
|
||||
|
||||
class Preview(Enum):
|
||||
"""Individual preview style features."""
|
||||
|
||||
hex_codes_in_unicode_sequences = auto()
|
||||
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
||||
# for https://github.com/psf/black/issues/3117 to be fixed.
|
||||
string_processing = auto()
|
||||
hug_parens_with_braces_and_square_brackets = auto()
|
||||
unify_docstring_detection = auto()
|
||||
no_normalize_fmt_skip_whitespace = auto()
|
||||
wrap_long_dict_values_in_parens = auto()
|
||||
multiline_string_handling = auto()
|
||||
typed_params_trailing_comma = auto()
|
||||
is_simple_lookup_for_doublestar_expression = auto()
|
||||
docstring_check_for_newline = auto()
|
||||
remove_redundant_guard_parens = auto()
|
||||
parens_for_long_if_clauses_in_case_block = auto()
|
||||
always_one_newline_after_import = auto()
|
||||
fix_fmt_skip_in_one_liners = auto()
|
||||
|
||||
|
||||
UNSTABLE_FEATURES: Set[Preview] = {
|
||||
UNSTABLE_FEATURES: set[Preview] = {
|
||||
# Many issues, see summary in https://github.com/psf/black/issues/4042
|
||||
Preview.string_processing,
|
||||
# See issues #3452 and #4158
|
||||
Preview.wrap_long_dict_values_in_parens,
|
||||
# See issue #4159
|
||||
Preview.multiline_string_handling,
|
||||
# See issue #4036 (crash), #4098, #4099 (proposed tweaks)
|
||||
@ -229,17 +225,17 @@ class Deprecated(UserWarning):
|
||||
|
||||
@dataclass
|
||||
class Mode:
|
||||
target_versions: Set[TargetVersion] = field(default_factory=set)
|
||||
target_versions: set[TargetVersion] = field(default_factory=set)
|
||||
line_length: int = DEFAULT_LINE_LENGTH
|
||||
string_normalization: bool = True
|
||||
is_pyi: bool = False
|
||||
is_ipynb: bool = False
|
||||
skip_source_first_line: bool = False
|
||||
magic_trailing_comma: bool = True
|
||||
python_cell_magics: Set[str] = field(default_factory=set)
|
||||
python_cell_magics: set[str] = field(default_factory=set)
|
||||
preview: bool = False
|
||||
unstable: bool = False
|
||||
enabled_features: Set[Preview] = field(default_factory=set)
|
||||
enabled_features: set[Preview] = field(default_factory=set)
|
||||
|
||||
def __contains__(self, feature: Preview) -> bool:
|
||||
"""
|
||||
@ -285,6 +281,7 @@ def get_cache_key(self) -> str:
|
||||
str(int(self.skip_source_first_line)),
|
||||
str(int(self.magic_trailing_comma)),
|
||||
str(int(self.preview)),
|
||||
str(int(self.unstable)),
|
||||
features_and_magics,
|
||||
]
|
||||
return ".".join(parts)
|
||||
|
@ -3,18 +3,8 @@
|
||||
"""
|
||||
|
||||
import sys
|
||||
from typing import (
|
||||
Final,
|
||||
Generic,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from collections.abc import Iterator
|
||||
from typing import Final, Generic, Literal, Optional, TypeVar, Union
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
@ -24,7 +14,7 @@
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
from black.cache import CACHE_DIR
|
||||
from black.mode import Mode, Preview
|
||||
from black.mode import Mode
|
||||
from black.strings import get_string_prefix, has_triple_quotes
|
||||
from blib2to3 import pygram
|
||||
from blib2to3.pgen2 import token
|
||||
@ -254,9 +244,9 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no
|
||||
elif (
|
||||
prevp.type == token.STAR
|
||||
and parent_type(prevp) == syms.star_expr
|
||||
and parent_type(prevp.parent) == syms.subscriptlist
|
||||
and parent_type(prevp.parent) in (syms.subscriptlist, syms.tname_star)
|
||||
):
|
||||
# No space between typevar tuples.
|
||||
# No space between typevar tuples or unpacking them.
|
||||
return NO
|
||||
|
||||
elif prevp.type in VARARGS_SPECIALS:
|
||||
@ -456,7 +446,7 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
|
||||
return None
|
||||
|
||||
|
||||
def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool:
|
||||
def prev_siblings_are(node: Optional[LN], tokens: list[Optional[NodeType]]) -> bool:
|
||||
"""Return if the `node` and its previous siblings match types against the provided
|
||||
list of tokens; the provided `node`has its type matched against the last element in
|
||||
the list. `None` can be used as the first element to declare that the start of the
|
||||
@ -555,7 +545,7 @@ def is_arith_like(node: LN) -> bool:
|
||||
}
|
||||
|
||||
|
||||
def is_docstring(node: NL, mode: Mode) -> bool:
|
||||
def is_docstring(node: NL) -> bool:
|
||||
if isinstance(node, Leaf):
|
||||
if node.type != token.STRING:
|
||||
return False
|
||||
@ -565,8 +555,7 @@ def is_docstring(node: NL, mode: Mode) -> bool:
|
||||
return False
|
||||
|
||||
if (
|
||||
Preview.unify_docstring_detection in mode
|
||||
and node.parent
|
||||
node.parent
|
||||
and node.parent.type == syms.simple_stmt
|
||||
and not node.parent.prev_sibling
|
||||
and node.parent.parent
|
||||
@ -614,6 +603,17 @@ def is_one_tuple(node: LN) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def is_tuple(node: LN) -> bool:
|
||||
"""Return True if `node` holds a tuple."""
|
||||
if node.type != syms.atom:
|
||||
return False
|
||||
gexp = unwrap_singleton_parenthesis(node)
|
||||
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_tuple_containing_walrus(node: LN) -> bool:
|
||||
"""Return True if `node` holds a tuple that contains a walrus operator."""
|
||||
if node.type != syms.atom:
|
||||
@ -625,11 +625,33 @@ def is_tuple_containing_walrus(node: LN) -> bool:
|
||||
return any(child.type == syms.namedexpr_test for child in gexp.children)
|
||||
|
||||
|
||||
def is_tuple_containing_star(node: LN) -> bool:
|
||||
"""Return True if `node` holds a tuple that contains a star operator."""
|
||||
if node.type != syms.atom:
|
||||
return False
|
||||
gexp = unwrap_singleton_parenthesis(node)
|
||||
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||
return False
|
||||
|
||||
return any(child.type == syms.star_expr for child in gexp.children)
|
||||
|
||||
|
||||
def is_generator(node: LN) -> bool:
|
||||
"""Return True if `node` holds a generator."""
|
||||
if node.type != syms.atom:
|
||||
return False
|
||||
gexp = unwrap_singleton_parenthesis(node)
|
||||
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||
return False
|
||||
|
||||
return any(child.type == syms.old_comp_for for child in gexp.children)
|
||||
|
||||
|
||||
def is_one_sequence_between(
|
||||
opening: Leaf,
|
||||
closing: Leaf,
|
||||
leaves: List[Leaf],
|
||||
brackets: Tuple[int, int] = (token.LPAR, token.RPAR),
|
||||
leaves: list[Leaf],
|
||||
brackets: tuple[int, int] = (token.LPAR, token.RPAR),
|
||||
) -> bool:
|
||||
"""Return True if content between `opening` and `closing` is a one-sequence."""
|
||||
if (opening.type, closing.type) != brackets:
|
||||
@ -739,7 +761,7 @@ def is_yield(node: LN) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
|
||||
def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool:
|
||||
"""Return True if `leaf` is a star or double star in a vararg or kwarg.
|
||||
|
||||
If `within` includes VARARGS_PARENTS, this applies to function signatures.
|
||||
@ -1006,6 +1028,7 @@ def get_annotation_type(leaf: Leaf) -> Literal["return", "param", None]:
|
||||
|
||||
def is_part_of_annotation(leaf: Leaf) -> bool:
|
||||
"""Returns whether this leaf is part of a type annotation."""
|
||||
assert leaf.parent is not None
|
||||
return get_annotation_type(leaf) is not None
|
||||
|
||||
|
||||
@ -1035,3 +1058,21 @@ def furthest_ancestor_with_last_leaf(leaf: Leaf) -> LN:
|
||||
while node.parent and node.parent.children and node is node.parent.children[-1]:
|
||||
node = node.parent
|
||||
return node
|
||||
|
||||
|
||||
def has_sibling_with_type(node: LN, type: int) -> bool:
|
||||
# Check previous siblings
|
||||
sibling = node.prev_sibling
|
||||
while sibling is not None:
|
||||
if sibling.type == type:
|
||||
return True
|
||||
sibling = sibling.prev_sibling
|
||||
|
||||
# Check next siblings
|
||||
sibling = node.next_sibling
|
||||
while sibling is not None:
|
||||
if sibling.type == type:
|
||||
return True
|
||||
sibling = sibling.next_sibling
|
||||
|
||||
return False
|
||||
|
@ -6,7 +6,7 @@
|
||||
import json
|
||||
import re
|
||||
import tempfile
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from click import echo, style
|
||||
from mypy_extensions import mypyc_attr
|
||||
@ -59,7 +59,7 @@ def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
||||
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
|
||||
|
||||
|
||||
def _splitlines_no_ff(source: str) -> List[str]:
|
||||
def _splitlines_no_ff(source: str) -> list[str]:
|
||||
"""Split a string into lines ignoring form feed and other chars.
|
||||
|
||||
This mimics how the Python parser splits source code.
|
||||
|
@ -5,7 +5,7 @@
|
||||
import ast
|
||||
import sys
|
||||
import warnings
|
||||
from typing import Iterable, Iterator, List, Set, Tuple
|
||||
from collections.abc import Collection, Iterator
|
||||
|
||||
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
||||
from black.nodes import syms
|
||||
@ -21,7 +21,7 @@ class InvalidInput(ValueError):
|
||||
"""Raised when input source code fails all parse attempts."""
|
||||
|
||||
|
||||
def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
|
||||
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
|
||||
if not target_versions:
|
||||
# No target_version specified, so try all grammars.
|
||||
return [
|
||||
@ -52,12 +52,20 @@ def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
|
||||
return grammars
|
||||
|
||||
|
||||
def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
|
||||
def lib2to3_parse(
|
||||
src_txt: str, target_versions: Collection[TargetVersion] = ()
|
||||
) -> Node:
|
||||
"""Given a string with source, return the lib2to3 Node."""
|
||||
if not src_txt.endswith("\n"):
|
||||
src_txt += "\n"
|
||||
|
||||
grammars = get_grammars(set(target_versions))
|
||||
if target_versions:
|
||||
max_tv = max(target_versions, key=lambda tv: tv.value)
|
||||
tv_str = f" for target version {max_tv.pretty()}"
|
||||
else:
|
||||
tv_str = ""
|
||||
|
||||
errors = {}
|
||||
for grammar in grammars:
|
||||
drv = driver.Driver(grammar)
|
||||
@ -73,14 +81,14 @@ def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -
|
||||
except IndexError:
|
||||
faulty_line = "<line number missing in source>"
|
||||
errors[grammar.version] = InvalidInput(
|
||||
f"Cannot parse: {lineno}:{column}: {faulty_line}"
|
||||
f"Cannot parse{tv_str}: {lineno}:{column}: {faulty_line}"
|
||||
)
|
||||
|
||||
except TokenError as te:
|
||||
# In edge cases these are raised; and typically don't have a "faulty_line".
|
||||
lineno, column = te.args[1]
|
||||
errors[grammar.version] = InvalidInput(
|
||||
f"Cannot parse: {lineno}:{column}: {te.args[0]}"
|
||||
f"Cannot parse{tv_str}: {lineno}:{column}: {te.args[0]}"
|
||||
)
|
||||
|
||||
else:
|
||||
@ -115,7 +123,7 @@ class ASTSafetyError(Exception):
|
||||
|
||||
|
||||
def _parse_single_version(
|
||||
src: str, version: Tuple[int, int], *, type_comments: bool
|
||||
src: str, version: tuple[int, int], *, type_comments: bool
|
||||
) -> ast.AST:
|
||||
filename = "<unknown>"
|
||||
with warnings.catch_warnings():
|
||||
@ -151,7 +159,7 @@ def parse_ast(src: str) -> ast.AST:
|
||||
def _normalize(lineend: str, value: str) -> str:
|
||||
# To normalize, we strip any leading and trailing space from
|
||||
# each line...
|
||||
stripped: List[str] = [i.strip() for i in value.splitlines()]
|
||||
stripped: list[str] = [i.strip() for i in value.splitlines()]
|
||||
normalized = lineend.join(stripped)
|
||||
# ...and remove any blank lines at the beginning and end of
|
||||
# the whole string
|
||||
@ -164,14 +172,14 @@ def stringify_ast(node: ast.AST) -> Iterator[str]:
|
||||
|
||||
|
||||
def _stringify_ast_with_new_parent(
|
||||
node: ast.AST, parent_stack: List[ast.AST], new_parent: ast.AST
|
||||
node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST
|
||||
) -> Iterator[str]:
|
||||
parent_stack.append(new_parent)
|
||||
yield from _stringify_ast(node, parent_stack)
|
||||
parent_stack.pop()
|
||||
|
||||
|
||||
def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
||||
def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]:
|
||||
if (
|
||||
isinstance(node, ast.Constant)
|
||||
and isinstance(node.value, str)
|
||||
@ -205,7 +213,7 @@ def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
||||
and isinstance(node, ast.Delete)
|
||||
and isinstance(item, ast.Tuple)
|
||||
):
|
||||
for elt in item.elts:
|
||||
for elt in _unwrap_tuples(item):
|
||||
yield from _stringify_ast_with_new_parent(
|
||||
elt, parent_stack, node
|
||||
)
|
||||
@ -242,3 +250,11 @@ def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
||||
)
|
||||
|
||||
yield f"{' ' * len(parent_stack)}) # /{node.__class__.__name__}"
|
||||
|
||||
|
||||
def _unwrap_tuples(node: ast.Tuple) -> Iterator[ast.AST]:
|
||||
for elt in node.elts:
|
||||
if isinstance(elt, ast.Tuple):
|
||||
yield from _unwrap_tuples(elt)
|
||||
else:
|
||||
yield elt
|
||||
|
@ -1,8 +1,9 @@
|
||||
"""Functions related to Black's formatting by line ranges feature."""
|
||||
|
||||
import difflib
|
||||
from collections.abc import Collection, Iterator, Sequence
|
||||
from dataclasses import dataclass
|
||||
from typing import Collection, Iterator, List, Sequence, Set, Tuple, Union
|
||||
from typing import Union
|
||||
|
||||
from black.nodes import (
|
||||
LN,
|
||||
@ -18,8 +19,8 @@
|
||||
from blib2to3.pgen2.token import ASYNC, NEWLINE
|
||||
|
||||
|
||||
def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
||||
lines: List[Tuple[int, int]] = []
|
||||
def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]:
|
||||
lines: list[tuple[int, int]] = []
|
||||
for lines_str in line_ranges:
|
||||
parts = lines_str.split("-")
|
||||
if len(parts) != 2:
|
||||
@ -40,14 +41,14 @@ def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
||||
return lines
|
||||
|
||||
|
||||
def is_valid_line_range(lines: Tuple[int, int]) -> bool:
|
||||
def is_valid_line_range(lines: tuple[int, int]) -> bool:
|
||||
"""Returns whether the line range is valid."""
|
||||
return not lines or lines[0] <= lines[1]
|
||||
|
||||
|
||||
def sanitized_lines(
|
||||
lines: Collection[Tuple[int, int]], src_contents: str
|
||||
) -> Collection[Tuple[int, int]]:
|
||||
lines: Collection[tuple[int, int]], src_contents: str
|
||||
) -> Collection[tuple[int, int]]:
|
||||
"""Returns the valid line ranges for the given source.
|
||||
|
||||
This removes ranges that are entirely outside the valid lines.
|
||||
@ -74,10 +75,10 @@ def sanitized_lines(
|
||||
|
||||
|
||||
def adjusted_lines(
|
||||
lines: Collection[Tuple[int, int]],
|
||||
lines: Collection[tuple[int, int]],
|
||||
original_source: str,
|
||||
modified_source: str,
|
||||
) -> List[Tuple[int, int]]:
|
||||
) -> list[tuple[int, int]]:
|
||||
"""Returns the adjusted line ranges based on edits from the original code.
|
||||
|
||||
This computes the new line ranges by diffing original_source and
|
||||
@ -153,7 +154,7 @@ def adjusted_lines(
|
||||
return new_lines
|
||||
|
||||
|
||||
def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]]) -> None:
|
||||
def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None:
|
||||
"""Converts unchanged lines to STANDALONE_COMMENT.
|
||||
|
||||
The idea is similar to how `# fmt: on/off` is implemented. It also converts the
|
||||
@ -177,7 +178,7 @@ def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]])
|
||||
more formatting to pass (1). However, it's hard to get it correct when
|
||||
incorrect indentations are used. So we defer this to future optimizations.
|
||||
"""
|
||||
lines_set: Set[int] = set()
|
||||
lines_set: set[int] = set()
|
||||
for start, end in lines:
|
||||
lines_set.update(range(start, end + 1))
|
||||
visitor = _TopLevelStatementsVisitor(lines_set)
|
||||
@ -205,7 +206,7 @@ class _TopLevelStatementsVisitor(Visitor[None]):
|
||||
classes/functions/statements.
|
||||
"""
|
||||
|
||||
def __init__(self, lines_set: Set[int]):
|
||||
def __init__(self, lines_set: set[int]):
|
||||
self._lines_set = lines_set
|
||||
|
||||
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
|
||||
@ -249,7 +250,7 @@ def visit_suite(self, node: Node) -> Iterator[None]:
|
||||
_convert_node_to_standalone_comment(semantic_parent)
|
||||
|
||||
|
||||
def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
||||
def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None:
|
||||
"""Converts unchanged to STANDALONE_COMMENT line by line."""
|
||||
for leaf in node.leaves():
|
||||
if leaf.type != NEWLINE:
|
||||
@ -261,7 +262,7 @@ def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
||||
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
|
||||
# Here we need to check `subject_expr`. The `case_block+` will be
|
||||
# checked by their own NEWLINEs.
|
||||
nodes_to_ignore: List[LN] = []
|
||||
nodes_to_ignore: list[LN] = []
|
||||
prev_sibling = leaf.prev_sibling
|
||||
while prev_sibling:
|
||||
nodes_to_ignore.insert(0, prev_sibling)
|
||||
@ -382,7 +383,7 @@ def _leaf_line_end(leaf: Leaf) -> int:
|
||||
return leaf.lineno + str(leaf).count("\n")
|
||||
|
||||
|
||||
def _get_line_range(node_or_nodes: Union[LN, List[LN]]) -> Set[int]:
|
||||
def _get_line_range(node_or_nodes: Union[LN, list[LN]]) -> set[int]:
|
||||
"""Returns the line range of this node or list of nodes."""
|
||||
if isinstance(node_or_nodes, list):
|
||||
nodes = node_or_nodes
|
||||
@ -463,7 +464,7 @@ def _calculate_lines_mappings(
|
||||
modified_source.splitlines(keepends=True),
|
||||
)
|
||||
matching_blocks = matcher.get_matching_blocks()
|
||||
lines_mappings: List[_LinesMapping] = []
|
||||
lines_mappings: list[_LinesMapping] = []
|
||||
# matching_blocks is a sequence of "same block of code ranges", see
|
||||
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
|
||||
# Each block corresponds to a _LinesMapping with is_changed_block=False,
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"$id": "https://github.com/psf/black/blob/main/black/resources/black.schema.json",
|
||||
"$id": "https://github.com/psf/black/blob/main/src/black/resources/black.schema.json",
|
||||
"$comment": "tool.black table in pyproject.toml",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
@ -79,18 +79,12 @@
|
||||
"type": "array",
|
||||
"items": {
|
||||
"enum": [
|
||||
"hex_codes_in_unicode_sequences",
|
||||
"string_processing",
|
||||
"hug_parens_with_braces_and_square_brackets",
|
||||
"unify_docstring_detection",
|
||||
"no_normalize_fmt_skip_whitespace",
|
||||
"wrap_long_dict_values_in_parens",
|
||||
"multiline_string_handling",
|
||||
"typed_params_trailing_comma",
|
||||
"is_simple_lookup_for_doublestar_expression",
|
||||
"docstring_check_for_newline",
|
||||
"remove_redundant_guard_parens",
|
||||
"parens_for_long_if_clauses_in_case_block"
|
||||
"always_one_newline_after_import",
|
||||
"fix_fmt_skip_in_one_liners"
|
||||
]
|
||||
},
|
||||
"description": "Enable specific features included in the `--unstable` style. Requires `--preview`. No compatibility guarantees are provided on the behavior or existence of any unstable features."
|
||||
|
@ -1,6 +1,5 @@
|
||||
import importlib.resources
|
||||
import json
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
|
||||
@ -11,10 +10,6 @@ def get_schema(tool_name: str = "black") -> Any:
|
||||
pkg = "black.resources"
|
||||
fname = "black.schema.json"
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
with importlib.resources.open_text(pkg, fname, encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
||||
schema = importlib.resources.files(pkg).joinpath(fname) # type: ignore[unreachable]
|
||||
schema = importlib.resources.files(pkg).joinpath(fname)
|
||||
with schema.open(encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
|
@ -5,7 +5,8 @@
|
||||
import re
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from typing import Final, List, Match, Pattern, Tuple
|
||||
from re import Match, Pattern
|
||||
from typing import Final
|
||||
|
||||
from black._width_table import WIDTH_TABLE
|
||||
from blib2to3.pytree import Leaf
|
||||
@ -43,7 +44,7 @@ def has_triple_quotes(string: str) -> bool:
|
||||
return raw_string[:3] in {'"""', "'''"}
|
||||
|
||||
|
||||
def lines_with_leading_tabs_expanded(s: str) -> List[str]:
|
||||
def lines_with_leading_tabs_expanded(s: str) -> list[str]:
|
||||
"""
|
||||
Splits string into lines and expands only leading tabs (following the normal
|
||||
Python rules)
|
||||
@ -62,10 +63,9 @@ def lines_with_leading_tabs_expanded(s: str) -> List[str]:
|
||||
return lines
|
||||
|
||||
|
||||
def fix_docstring(docstring: str, prefix: str) -> str:
|
||||
def fix_multiline_docstring(docstring: str, prefix: str) -> str:
|
||||
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
|
||||
if not docstring:
|
||||
return ""
|
||||
assert docstring, "INTERNAL ERROR: Multiline docstrings cannot be empty"
|
||||
lines = lines_with_leading_tabs_expanded(docstring)
|
||||
# Determine minimum indentation (first line doesn't count):
|
||||
indent = sys.maxsize
|
||||
@ -185,8 +185,7 @@ def normalize_string_quotes(s: str) -> str:
|
||||
orig_quote = "'"
|
||||
new_quote = '"'
|
||||
first_quote_pos = s.find(orig_quote)
|
||||
if first_quote_pos == -1:
|
||||
return s # There's an internal error
|
||||
assert first_quote_pos != -1, f"INTERNAL ERROR: Malformed string {s!r}"
|
||||
|
||||
prefix = s[:first_quote_pos]
|
||||
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
|
||||
@ -242,9 +241,9 @@ def normalize_string_quotes(s: str) -> str:
|
||||
|
||||
def normalize_fstring_quotes(
|
||||
quote: str,
|
||||
middles: List[Leaf],
|
||||
middles: list[Leaf],
|
||||
is_raw_fstring: bool,
|
||||
) -> Tuple[List[Leaf], str]:
|
||||
) -> tuple[list[Leaf], str]:
|
||||
"""Prefer double quotes but only if it doesn't cause more escaping.
|
||||
|
||||
Adds or removes backslashes as appropriate.
|
||||
|
@ -5,31 +5,15 @@
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable, Collection, Iterable, Iterator, Sequence
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
ClassVar,
|
||||
Collection,
|
||||
Dict,
|
||||
Final,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing import Any, ClassVar, Final, Literal, Optional, TypeVar, Union
|
||||
|
||||
from mypy_extensions import trait
|
||||
|
||||
from black.comments import contains_pragma_comment
|
||||
from black.lines import Line, append_leaves
|
||||
from black.mode import Feature, Mode, Preview
|
||||
from black.mode import Feature, Mode
|
||||
from black.nodes import (
|
||||
CLOSING_BRACKETS,
|
||||
OPENING_BRACKETS,
|
||||
@ -68,7 +52,7 @@ class CannotTransform(Exception):
|
||||
ParserState = int
|
||||
StringID = int
|
||||
TResult = Result[T, CannotTransform] # (T)ransform Result
|
||||
TMatchResult = TResult[List[Index]]
|
||||
TMatchResult = TResult[list[Index]]
|
||||
|
||||
SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops
|
||||
|
||||
@ -98,18 +82,12 @@ def is_simple_lookup(index: int, kind: Literal[1, -1]) -> bool:
|
||||
# Brackets and parentheses indicate calls, subscripts, etc. ...
|
||||
# basically stuff that doesn't count as "simple". Only a NAME lookup
|
||||
# or dotted lookup (eg. NAME.NAME) is OK.
|
||||
if Preview.is_simple_lookup_for_doublestar_expression not in mode:
|
||||
return original_is_simple_lookup_func(line, index, kind)
|
||||
|
||||
if kind == -1:
|
||||
return handle_is_simple_look_up_prev(line, index, {token.RPAR, token.RSQB})
|
||||
else:
|
||||
if kind == -1:
|
||||
return handle_is_simple_look_up_prev(
|
||||
line, index, {token.RPAR, token.RSQB}
|
||||
)
|
||||
else:
|
||||
return handle_is_simple_lookup_forward(
|
||||
line, index, {token.LPAR, token.LSQB}
|
||||
)
|
||||
return handle_is_simple_lookup_forward(
|
||||
line, index, {token.LPAR, token.LSQB}
|
||||
)
|
||||
|
||||
def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool:
|
||||
# An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple
|
||||
@ -155,31 +133,7 @@ def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool:
|
||||
yield new_line
|
||||
|
||||
|
||||
def original_is_simple_lookup_func(
|
||||
line: Line, index: int, step: Literal[1, -1]
|
||||
) -> bool:
|
||||
if step == -1:
|
||||
disallowed = {token.RPAR, token.RSQB}
|
||||
else:
|
||||
disallowed = {token.LPAR, token.LSQB}
|
||||
|
||||
while 0 <= index < len(line.leaves):
|
||||
current = line.leaves[index]
|
||||
if current.type in disallowed:
|
||||
return False
|
||||
if current.type not in {token.NAME, token.DOT} or current.value == "for":
|
||||
# If the current token isn't disallowed, we'll assume this is
|
||||
# simple as only the disallowed tokens are semantically
|
||||
# attached to this lookup expression we're checking. Also,
|
||||
# stop early if we hit the 'for' bit of a comprehension.
|
||||
return True
|
||||
|
||||
index += step
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int]) -> bool:
|
||||
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool:
|
||||
"""
|
||||
Handling the determination of is_simple_lookup for the lines prior to the doublestar
|
||||
token. This is required because of the need to isolate the chained expression
|
||||
@ -202,7 +156,7 @@ def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int])
|
||||
|
||||
|
||||
def handle_is_simple_lookup_forward(
|
||||
line: Line, index: int, disallowed: Set[int]
|
||||
line: Line, index: int, disallowed: set[int]
|
||||
) -> bool:
|
||||
"""
|
||||
Handling decision is_simple_lookup for the lines behind the doublestar token.
|
||||
@ -227,7 +181,7 @@ def handle_is_simple_lookup_forward(
|
||||
return True
|
||||
|
||||
|
||||
def is_expression_chained(chained_leaves: List[Leaf]) -> bool:
|
||||
def is_expression_chained(chained_leaves: list[Leaf]) -> bool:
|
||||
"""
|
||||
Function to determine if the variable is a chained call.
|
||||
(e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call)
|
||||
@ -298,7 +252,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
|
||||
@abstractmethod
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
"""
|
||||
Yields:
|
||||
@ -388,8 +342,8 @@ class CustomSplitMapMixin:
|
||||
the resultant substrings go over the configured max line length.
|
||||
"""
|
||||
|
||||
_Key: ClassVar = Tuple[StringID, str]
|
||||
_CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict(
|
||||
_Key: ClassVar = tuple[StringID, str]
|
||||
_CUSTOM_SPLIT_MAP: ClassVar[dict[_Key, tuple[CustomSplit, ...]]] = defaultdict(
|
||||
tuple
|
||||
)
|
||||
|
||||
@ -413,7 +367,7 @@ def add_custom_splits(
|
||||
key = self._get_key(string)
|
||||
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
||||
|
||||
def pop_custom_splits(self, string: str) -> List[CustomSplit]:
|
||||
def pop_custom_splits(self, string: str) -> list[CustomSplit]:
|
||||
"""Custom Split Map Getter Method
|
||||
|
||||
Returns:
|
||||
@ -488,7 +442,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
break
|
||||
i += 1
|
||||
|
||||
if not is_part_of_annotation(leaf) and not contains_comment:
|
||||
if not contains_comment and not is_part_of_annotation(leaf):
|
||||
string_indices.append(idx)
|
||||
|
||||
# Advance to the next non-STRING leaf.
|
||||
@ -512,7 +466,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
return TErr("This line has no strings that need merging.")
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
new_line = line
|
||||
|
||||
@ -543,7 +497,7 @@ def do_transform(
|
||||
|
||||
@staticmethod
|
||||
def _remove_backslash_line_continuation_chars(
|
||||
line: Line, string_indices: List[int]
|
||||
line: Line, string_indices: list[int]
|
||||
) -> TResult[Line]:
|
||||
"""
|
||||
Merge strings that were split across multiple lines using
|
||||
@ -584,7 +538,7 @@ def _remove_backslash_line_continuation_chars(
|
||||
return Ok(new_line)
|
||||
|
||||
def _merge_string_group(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> TResult[Line]:
|
||||
"""
|
||||
Merges string groups (i.e. set of adjacent strings).
|
||||
@ -603,7 +557,7 @@ def _merge_string_group(
|
||||
is_valid_index = is_valid_index_factory(LL)
|
||||
|
||||
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
|
||||
merged_string_idx_dict: Dict[int, Tuple[int, Leaf]] = {}
|
||||
merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {}
|
||||
for string_idx in string_indices:
|
||||
vresult = self._validate_msg(line, string_idx)
|
||||
if isinstance(vresult, Err):
|
||||
@ -639,8 +593,8 @@ def _merge_string_group(
|
||||
return Ok(new_line)
|
||||
|
||||
def _merge_one_string_group(
|
||||
self, LL: List[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
||||
) -> Tuple[int, Leaf]:
|
||||
self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
||||
) -> tuple[int, Leaf]:
|
||||
"""
|
||||
Merges one string group where the first string in the group is
|
||||
`LL[string_idx]`.
|
||||
@ -676,10 +630,10 @@ def make_naked(string: str, string_prefix: str) -> str:
|
||||
"""
|
||||
assert_is_leaf_string(string)
|
||||
if "f" in string_prefix:
|
||||
f_expressions = (
|
||||
f_expressions = [
|
||||
string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces
|
||||
for span in iter_fexpr_spans(string)
|
||||
)
|
||||
]
|
||||
debug_expressions_contain_visible_quotes = any(
|
||||
re.search(r".*[\'\"].*(?<![!:=])={1}(?!=)(?![^\s:])", expression)
|
||||
for expression in f_expressions
|
||||
@ -810,6 +764,8 @@ def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
|
||||
- The set of all string prefixes in the string group is of
|
||||
length greater than one and is not equal to {"", "f"}.
|
||||
- The string group consists of raw strings.
|
||||
- The string group would merge f-strings with different quote types
|
||||
and internal quotes.
|
||||
- The string group is stringified type annotations. We don't want to
|
||||
process stringified type annotations since pyright doesn't support
|
||||
them spanning multiple string values. (NOTE: mypy, pytype, pyre do
|
||||
@ -836,6 +792,8 @@ def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
|
||||
|
||||
i += inc
|
||||
|
||||
QUOTE = line.leaves[string_idx].value[-1]
|
||||
|
||||
num_of_inline_string_comments = 0
|
||||
set_of_prefixes = set()
|
||||
num_of_strings = 0
|
||||
@ -858,6 +816,19 @@ def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
|
||||
|
||||
set_of_prefixes.add(prefix)
|
||||
|
||||
if (
|
||||
"f" in prefix
|
||||
and leaf.value[-1] != QUOTE
|
||||
and (
|
||||
"'" in leaf.value[len(prefix) + 1 : -1]
|
||||
or '"' in leaf.value[len(prefix) + 1 : -1]
|
||||
)
|
||||
):
|
||||
return TErr(
|
||||
"StringMerger does NOT merge f-strings with different quote types"
|
||||
" and internal quotes."
|
||||
)
|
||||
|
||||
if id(leaf) in line.comments:
|
||||
num_of_inline_string_comments += 1
|
||||
if contains_pragma_comment(line.comments[id(leaf)]):
|
||||
@ -886,6 +857,7 @@ class StringParenStripper(StringTransformer):
|
||||
The line contains a string which is surrounded by parentheses and:
|
||||
- The target string is NOT the only argument to a function call.
|
||||
- The target string is NOT a "pointless" string.
|
||||
- The target string is NOT a dictionary value.
|
||||
- If the target string contains a PERCENT, the brackets are not
|
||||
preceded or followed by an operator with higher precedence than
|
||||
PERCENT.
|
||||
@ -933,11 +905,14 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
):
|
||||
continue
|
||||
|
||||
# That LPAR should NOT be preceded by a function name or a closing
|
||||
# bracket (which could be a function which returns a function or a
|
||||
# list/dictionary that contains a function)...
|
||||
# That LPAR should NOT be preceded by a colon (which could be a
|
||||
# dictionary value), function name, or a closing bracket (which
|
||||
# could be a function returning a function or a list/dictionary
|
||||
# containing a function)...
|
||||
if is_valid_index(idx - 2) and (
|
||||
LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS
|
||||
LL[idx - 2].type == token.COLON
|
||||
or LL[idx - 2].type == token.NAME
|
||||
or LL[idx - 2].type in CLOSING_BRACKETS
|
||||
):
|
||||
continue
|
||||
|
||||
@ -1004,11 +979,11 @@ def do_match(self, line: Line) -> TMatchResult:
|
||||
return TErr("This line has no strings wrapped in parens.")
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
LL = line.leaves
|
||||
|
||||
string_and_rpar_indices: List[int] = []
|
||||
string_and_rpar_indices: list[int] = []
|
||||
for string_idx in string_indices:
|
||||
string_parser = StringParser()
|
||||
rpar_idx = string_parser.parse(LL, string_idx)
|
||||
@ -1031,7 +1006,7 @@ def do_transform(
|
||||
)
|
||||
|
||||
def _transform_to_new_line(
|
||||
self, line: Line, string_and_rpar_indices: List[int]
|
||||
self, line: Line, string_and_rpar_indices: list[int]
|
||||
) -> Line:
|
||||
LL = line.leaves
|
||||
|
||||
@ -1284,7 +1259,7 @@ def _get_max_string_length(self, line: Line, string_idx: int) -> int:
|
||||
return max_string_length
|
||||
|
||||
@staticmethod
|
||||
def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _prefer_paren_wrap_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -1329,14 +1304,14 @@ def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
|
||||
def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:
|
||||
def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]:
|
||||
"""
|
||||
Yields spans corresponding to expressions in a given f-string.
|
||||
Spans are half-open ranges (left inclusive, right exclusive).
|
||||
Assumes the input string is a valid f-string, but will not crash if the input
|
||||
string is invalid.
|
||||
"""
|
||||
stack: List[int] = [] # our curly paren stack
|
||||
stack: list[int] = [] # our curly paren stack
|
||||
i = 0
|
||||
while i < len(s):
|
||||
if s[i] == "{":
|
||||
@ -1499,7 +1474,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
||||
return Ok([string_idx])
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
LL = line.leaves
|
||||
assert len(string_indices) == 1, (
|
||||
@ -1601,7 +1576,7 @@ def more_splits_should_be_made() -> bool:
|
||||
else:
|
||||
return str_width(rest_value) > max_last_string_column()
|
||||
|
||||
string_line_results: List[Ok[Line]] = []
|
||||
string_line_results: list[Ok[Line]] = []
|
||||
while more_splits_should_be_made():
|
||||
if use_custom_breakpoints:
|
||||
# Custom User Split (manual)
|
||||
@ -1730,7 +1705,7 @@ def more_splits_should_be_made() -> bool:
|
||||
last_line.comments = line.comments.copy()
|
||||
yield Ok(last_line)
|
||||
|
||||
def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||
"""
|
||||
Yields:
|
||||
All ranges of @string which, if @string were to be split there,
|
||||
@ -1761,7 +1736,7 @@ def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
|
||||
yield begin, end
|
||||
|
||||
def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||
"""
|
||||
Yields:
|
||||
All ranges of @string which, if @string were to be split there,
|
||||
@ -1772,8 +1747,8 @@ def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
||||
return
|
||||
yield from iter_fexpr_spans(string)
|
||||
|
||||
def _get_illegal_split_indices(self, string: str) -> Set[Index]:
|
||||
illegal_indices: Set[Index] = set()
|
||||
def _get_illegal_split_indices(self, string: str) -> set[Index]:
|
||||
illegal_indices: set[Index] = set()
|
||||
iterators = [
|
||||
self._iter_fexpr_slices(string),
|
||||
self._iter_nameescape_slices(string),
|
||||
@ -1899,7 +1874,7 @@ def _normalize_f_string(self, string: str, prefix: str) -> str:
|
||||
else:
|
||||
return string
|
||||
|
||||
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]:
|
||||
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]:
|
||||
LL = list(leaves)
|
||||
|
||||
string_op_leaves = []
|
||||
@ -2008,7 +1983,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
||||
return TErr("This line does not contain any non-atomic strings.")
|
||||
|
||||
@staticmethod
|
||||
def _return_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _return_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2033,7 +2008,7 @@ def _return_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _else_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _else_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2060,7 +2035,7 @@ def _else_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _assert_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2095,7 +2070,7 @@ def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _assign_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2142,7 +2117,7 @@ def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
||||
def _dict_or_lambda_match(LL: list[Leaf]) -> Optional[int]:
|
||||
"""
|
||||
Returns:
|
||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||
@ -2181,7 +2156,7 @@ def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
||||
return None
|
||||
|
||||
def do_transform(
|
||||
self, line: Line, string_indices: List[int]
|
||||
self, line: Line, string_indices: list[int]
|
||||
) -> Iterator[TResult[Line]]:
|
||||
LL = line.leaves
|
||||
assert len(string_indices) == 1, (
|
||||
@ -2263,12 +2238,12 @@ def do_transform(
|
||||
elif right_leaves and right_leaves[-1].type == token.RPAR:
|
||||
# Special case for lambda expressions as dict's value, e.g.:
|
||||
# my_dict = {
|
||||
# "key": lambda x: f"formatted: {x},
|
||||
# "key": lambda x: f"formatted: {x}",
|
||||
# }
|
||||
# After wrapping the dict's value with parentheses, the string is
|
||||
# followed by a RPAR but its opening bracket is lambda's, not
|
||||
# the string's:
|
||||
# "key": (lambda x: f"formatted: {x}),
|
||||
# "key": (lambda x: f"formatted: {x}"),
|
||||
opening_bracket = right_leaves[-1].opening_bracket
|
||||
if opening_bracket is not None and opening_bracket in left_leaves:
|
||||
index = left_leaves.index(opening_bracket)
|
||||
@ -2347,7 +2322,7 @@ class StringParser:
|
||||
DONE: Final = 8
|
||||
|
||||
# Lookup Table for Next State
|
||||
_goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = {
|
||||
_goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = {
|
||||
# A string trailer may start with '.' OR '%'.
|
||||
(START, token.DOT): DOT,
|
||||
(START, token.PERCENT): PERCENT,
|
||||
@ -2376,7 +2351,7 @@ def __init__(self) -> None:
|
||||
self._state = self.START
|
||||
self._unmatched_lpars = 0
|
||||
|
||||
def parse(self, leaves: List[Leaf], string_idx: int) -> int:
|
||||
def parse(self, leaves: list[Leaf], string_idx: int) -> int:
|
||||
"""
|
||||
Pre-conditions:
|
||||
* @leaves[@string_idx].type == token.STRING
|
||||
|
@ -2,9 +2,8 @@
|
||||
import logging
|
||||
from concurrent.futures import Executor, ProcessPoolExecutor
|
||||
from datetime import datetime, timezone
|
||||
from functools import partial
|
||||
from functools import cache, partial
|
||||
from multiprocessing import freeze_support
|
||||
from typing import Set, Tuple
|
||||
|
||||
try:
|
||||
from aiohttp import web
|
||||
@ -86,12 +85,16 @@ def main(bind_host: str, bind_port: int) -> None:
|
||||
web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
|
||||
|
||||
|
||||
@cache
|
||||
def executor() -> Executor:
|
||||
return ProcessPoolExecutor()
|
||||
|
||||
|
||||
def make_app() -> web.Application:
|
||||
app = web.Application(
|
||||
middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))]
|
||||
)
|
||||
executor = ProcessPoolExecutor()
|
||||
app.add_routes([web.post("/", partial(handle, executor=executor))])
|
||||
app.add_routes([web.post("/", partial(handle, executor=executor()))])
|
||||
return app
|
||||
|
||||
|
||||
@ -191,7 +194,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
||||
|
||||
preview = bool(headers.get(PREVIEW, False))
|
||||
unstable = bool(headers.get(UNSTABLE, False))
|
||||
enable_features: Set[black.Preview] = set()
|
||||
enable_features: set[black.Preview] = set()
|
||||
enable_unstable_features = headers.get(ENABLE_UNSTABLE_FEATURE, "").split(",")
|
||||
for piece in enable_unstable_features:
|
||||
piece = piece.strip()
|
||||
@ -216,7 +219,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
||||
)
|
||||
|
||||
|
||||
def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
|
||||
def parse_python_variant_header(value: str) -> tuple[bool, set[black.TargetVersion]]:
|
||||
if value == "pyi":
|
||||
return True, set()
|
||||
else:
|
||||
|
@ -1,21 +1,11 @@
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, TypeVar
|
||||
from collections.abc import Awaitable, Callable, Iterable
|
||||
|
||||
from aiohttp.typedefs import Middleware
|
||||
from aiohttp.web_middlewares import middleware
|
||||
from aiohttp.web_request import Request
|
||||
from aiohttp.web_response import StreamResponse
|
||||
|
||||
if TYPE_CHECKING:
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
middleware: Callable[[F], F]
|
||||
else:
|
||||
try:
|
||||
from aiohttp.web_middlewares import middleware
|
||||
except ImportError:
|
||||
# @middleware is deprecated and its behaviour is the default since aiohttp 4.0
|
||||
# so if it doesn't exist anymore, define a no-op for forward compatibility.
|
||||
middleware = lambda x: x # noqa: E731
|
||||
|
||||
Handler = Callable[[Request], Awaitable[StreamResponse]]
|
||||
Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]]
|
||||
|
||||
|
||||
def cors(allow_headers: Iterable[str]) -> Middleware:
|
||||
|
@ -12,9 +12,9 @@ file_input: (NEWLINE | stmt)* ENDMARKER
|
||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||
eval_input: testlist NEWLINE* ENDMARKER
|
||||
|
||||
typevar: NAME [':' expr] ['=' expr]
|
||||
paramspec: '**' NAME ['=' expr]
|
||||
typevartuple: '*' NAME ['=' (expr|star_expr)]
|
||||
typevar: NAME [':' test] ['=' test]
|
||||
paramspec: '**' NAME ['=' test]
|
||||
typevartuple: '*' NAME ['=' (test|star_expr)]
|
||||
typeparam: typevar | paramspec | typevartuple
|
||||
typeparams: '[' typeparam (',' typeparam)* [','] ']'
|
||||
|
||||
|
@ -21,13 +21,14 @@
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
from collections.abc import Iterable, Iterator
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from logging import Logger
|
||||
from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast
|
||||
from typing import IO, Any, Optional, Union, cast
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||
from blib2to3.pgen2.tokenize import TokenInfo
|
||||
from blib2to3.pytree import NL
|
||||
|
||||
# Pgen imports
|
||||
@ -40,7 +41,7 @@
|
||||
class ReleaseRange:
|
||||
start: int
|
||||
end: Optional[int] = None
|
||||
tokens: List[Any] = field(default_factory=list)
|
||||
tokens: list[Any] = field(default_factory=list)
|
||||
|
||||
def lock(self) -> None:
|
||||
total_eaten = len(self.tokens)
|
||||
@ -51,7 +52,7 @@ class TokenProxy:
|
||||
def __init__(self, generator: Any) -> None:
|
||||
self._tokens = generator
|
||||
self._counter = 0
|
||||
self._release_ranges: List[ReleaseRange] = []
|
||||
self._release_ranges: list[ReleaseRange] = []
|
||||
|
||||
@contextmanager
|
||||
def release(self) -> Iterator["TokenProxy"]:
|
||||
@ -111,7 +112,7 @@ def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
|
||||
logger = logging.getLogger(__name__)
|
||||
self.logger = logger
|
||||
|
||||
def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> NL:
|
||||
def parse_tokens(self, tokens: Iterable[TokenInfo], debug: bool = False) -> NL:
|
||||
"""Parse a series of tokens and return the syntax tree."""
|
||||
# XXX Move the prefix computation into a wrapper around tokenize.
|
||||
proxy = TokenProxy(tokens)
|
||||
@ -121,7 +122,7 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
|
||||
|
||||
lineno = 1
|
||||
column = 0
|
||||
indent_columns: List[int] = []
|
||||
indent_columns: list[int] = []
|
||||
type = value = start = end = line_text = None
|
||||
prefix = ""
|
||||
|
||||
@ -179,31 +180,21 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
|
||||
assert p.rootnode is not None
|
||||
return p.rootnode
|
||||
|
||||
def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:
|
||||
"""Parse a stream and return the syntax tree."""
|
||||
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
|
||||
return self.parse_tokens(tokens, debug)
|
||||
|
||||
def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:
|
||||
"""Parse a stream and return the syntax tree."""
|
||||
return self.parse_stream_raw(stream, debug)
|
||||
|
||||
def parse_file(
|
||||
self, filename: Path, encoding: Optional[str] = None, debug: bool = False
|
||||
) -> NL:
|
||||
"""Parse a file and return the syntax tree."""
|
||||
with open(filename, encoding=encoding) as stream:
|
||||
return self.parse_stream(stream, debug)
|
||||
text = stream.read()
|
||||
return self.parse_string(text, debug)
|
||||
|
||||
def parse_string(self, text: str, debug: bool = False) -> NL:
|
||||
"""Parse a string and return the syntax tree."""
|
||||
tokens = tokenize.generate_tokens(
|
||||
io.StringIO(text).readline, grammar=self.grammar
|
||||
)
|
||||
tokens = tokenize.tokenize(text, grammar=self.grammar)
|
||||
return self.parse_tokens(tokens, debug)
|
||||
|
||||
def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
|
||||
lines: List[str] = []
|
||||
def _partially_consume_prefix(self, prefix: str, column: int) -> tuple[str, str]:
|
||||
lines: list[str] = []
|
||||
current_line = ""
|
||||
current_column = 0
|
||||
wait_for_nl = False
|
||||
|
@ -16,15 +16,15 @@
|
||||
import os
|
||||
import pickle
|
||||
import tempfile
|
||||
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
|
||||
from typing import Any, Optional, TypeVar, Union
|
||||
|
||||
# Local imports
|
||||
from . import token
|
||||
|
||||
_P = TypeVar("_P", bound="Grammar")
|
||||
Label = Tuple[int, Optional[str]]
|
||||
DFA = List[List[Tuple[int, int]]]
|
||||
DFAS = Tuple[DFA, Dict[int, int]]
|
||||
Label = tuple[int, Optional[str]]
|
||||
DFA = list[list[tuple[int, int]]]
|
||||
DFAS = tuple[DFA, dict[int, int]]
|
||||
Path = Union[str, "os.PathLike[str]"]
|
||||
|
||||
|
||||
@ -83,16 +83,16 @@ class Grammar:
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.symbol2number: Dict[str, int] = {}
|
||||
self.number2symbol: Dict[int, str] = {}
|
||||
self.states: List[DFA] = []
|
||||
self.dfas: Dict[int, DFAS] = {}
|
||||
self.labels: List[Label] = [(0, "EMPTY")]
|
||||
self.keywords: Dict[str, int] = {}
|
||||
self.soft_keywords: Dict[str, int] = {}
|
||||
self.tokens: Dict[int, int] = {}
|
||||
self.symbol2label: Dict[str, int] = {}
|
||||
self.version: Tuple[int, int] = (0, 0)
|
||||
self.symbol2number: dict[str, int] = {}
|
||||
self.number2symbol: dict[int, str] = {}
|
||||
self.states: list[DFA] = []
|
||||
self.dfas: dict[int, DFAS] = {}
|
||||
self.labels: list[Label] = [(0, "EMPTY")]
|
||||
self.keywords: dict[str, int] = {}
|
||||
self.soft_keywords: dict[str, int] = {}
|
||||
self.tokens: dict[int, int] = {}
|
||||
self.symbol2label: dict[str, int] = {}
|
||||
self.version: tuple[int, int] = (0, 0)
|
||||
self.start = 256
|
||||
# Python 3.7+ parses async as a keyword, not an identifier
|
||||
self.async_keywords = False
|
||||
@ -114,7 +114,7 @@ def dump(self, filename: Path) -> None:
|
||||
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
|
||||
os.replace(f.name, filename)
|
||||
|
||||
def _update(self, attrs: Dict[str, Any]) -> None:
|
||||
def _update(self, attrs: dict[str, Any]) -> None:
|
||||
for k, v in attrs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
|
@ -4,9 +4,8 @@
|
||||
"""Safely evaluate Python string literals without using eval()."""
|
||||
|
||||
import re
|
||||
from typing import Dict, Match
|
||||
|
||||
simple_escapes: Dict[str, str] = {
|
||||
simple_escapes: dict[str, str] = {
|
||||
"a": "\a",
|
||||
"b": "\b",
|
||||
"f": "\f",
|
||||
@ -20,7 +19,7 @@
|
||||
}
|
||||
|
||||
|
||||
def escape(m: Match[str]) -> str:
|
||||
def escape(m: re.Match[str]) -> str:
|
||||
all, tail = m.group(0, 1)
|
||||
assert all.startswith("\\")
|
||||
esc = simple_escapes.get(tail)
|
||||
@ -29,16 +28,16 @@ def escape(m: Match[str]) -> str:
|
||||
if tail.startswith("x"):
|
||||
hexes = tail[1:]
|
||||
if len(hexes) < 2:
|
||||
raise ValueError("invalid hex string escape ('\\%s')" % tail)
|
||||
raise ValueError(f"invalid hex string escape ('\\{tail}')")
|
||||
try:
|
||||
i = int(hexes, 16)
|
||||
except ValueError:
|
||||
raise ValueError("invalid hex string escape ('\\%s')" % tail) from None
|
||||
raise ValueError(f"invalid hex string escape ('\\{tail}')") from None
|
||||
else:
|
||||
try:
|
||||
i = int(tail, 8)
|
||||
except ValueError:
|
||||
raise ValueError("invalid octal string escape ('\\%s')" % tail) from None
|
||||
raise ValueError(f"invalid octal string escape ('\\{tail}')") from None
|
||||
return chr(i)
|
||||
|
||||
|
||||
|
@ -9,20 +9,9 @@
|
||||
how this parsing engine works.
|
||||
|
||||
"""
|
||||
from collections.abc import Callable, Iterator
|
||||
from contextlib import contextmanager
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
|
||||
@ -34,10 +23,10 @@
|
||||
from blib2to3.pgen2.driver import TokenProxy
|
||||
|
||||
|
||||
Results = Dict[str, NL]
|
||||
Results = dict[str, NL]
|
||||
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
||||
DFA = List[List[Tuple[int, int]]]
|
||||
DFAS = Tuple[DFA, Dict[int, int]]
|
||||
DFA = list[list[tuple[int, int]]]
|
||||
DFAS = tuple[DFA, dict[int, int]]
|
||||
|
||||
|
||||
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
||||
@ -50,24 +39,24 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
||||
|
||||
|
||||
def stack_copy(
|
||||
stack: List[Tuple[DFAS, int, RawNode]],
|
||||
) -> List[Tuple[DFAS, int, RawNode]]:
|
||||
stack: list[tuple[DFAS, int, RawNode]],
|
||||
) -> list[tuple[DFAS, int, RawNode]]:
|
||||
"""Nodeless stack copy."""
|
||||
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
|
||||
|
||||
|
||||
class Recorder:
|
||||
def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None:
|
||||
def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None:
|
||||
self.parser = parser
|
||||
self._ilabels = ilabels
|
||||
self.context = context # not really matter
|
||||
|
||||
self._dead_ilabels: Set[int] = set()
|
||||
self._dead_ilabels: set[int] = set()
|
||||
self._start_point = self.parser.stack
|
||||
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
|
||||
|
||||
@property
|
||||
def ilabels(self) -> Set[int]:
|
||||
def ilabels(self) -> set[int]:
|
||||
return self._dead_ilabels.symmetric_difference(self._ilabels)
|
||||
|
||||
@contextmanager
|
||||
@ -100,18 +89,12 @@ def backtrack(self) -> Iterator[None]:
|
||||
self.parser.is_backtracking = is_backtracking
|
||||
|
||||
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
|
||||
func: Callable[..., Any]
|
||||
if raw:
|
||||
func = self.parser._addtoken
|
||||
else:
|
||||
func = self.parser.addtoken
|
||||
|
||||
for ilabel in self.ilabels:
|
||||
with self.switch_to(ilabel):
|
||||
args = [tok_type, tok_val, self.context]
|
||||
if raw:
|
||||
args.insert(0, ilabel)
|
||||
func(*args)
|
||||
self.parser._addtoken(ilabel, tok_type, tok_val, self.context)
|
||||
else:
|
||||
self.parser.addtoken(tok_type, tok_val, self.context)
|
||||
|
||||
def determine_route(
|
||||
self, value: Optional[str] = None, force: bool = False
|
||||
@ -233,9 +216,9 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
|
||||
# where children is a list of nodes or None, and context may be None.
|
||||
newnode: RawNode = (start, None, None, [])
|
||||
stackentry = (self.grammar.dfas[start], 0, newnode)
|
||||
self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry]
|
||||
self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry]
|
||||
self.rootnode: Optional[NL] = None
|
||||
self.used_names: Set[str] = set()
|
||||
self.used_names: set[str] = set()
|
||||
self.proxy = proxy
|
||||
self.last_token = None
|
||||
|
||||
@ -333,7 +316,7 @@ def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> boo
|
||||
# No success finding a transition
|
||||
raise ParseError("bad input", type, value, context)
|
||||
|
||||
def classify(self, type: int, value: str, context: Context) -> List[int]:
|
||||
def classify(self, type: int, value: str, context: Context) -> list[int]:
|
||||
"""Turn a token into a label. (Internal)
|
||||
|
||||
Depending on whether the value is a soft-keyword or not,
|
||||
|
@ -2,21 +2,11 @@
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
import os
|
||||
from typing import (
|
||||
IO,
|
||||
Any,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from collections.abc import Iterator, Sequence
|
||||
from typing import IO, Any, NoReturn, Optional, Union
|
||||
|
||||
from blib2to3.pgen2 import grammar, token, tokenize
|
||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||
from blib2to3.pgen2.tokenize import TokenInfo
|
||||
|
||||
Path = Union[str, "os.PathLike[str]"]
|
||||
|
||||
@ -28,8 +18,8 @@ class PgenGrammar(grammar.Grammar):
|
||||
class ParserGenerator:
|
||||
filename: Path
|
||||
stream: IO[str]
|
||||
generator: Iterator[GoodTokenInfo]
|
||||
first: Dict[str, Optional[Dict[str, int]]]
|
||||
generator: Iterator[TokenInfo]
|
||||
first: dict[str, Optional[dict[str, int]]]
|
||||
|
||||
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
||||
close_stream = None
|
||||
@ -37,8 +27,7 @@ def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
||||
stream = open(filename, encoding="utf-8")
|
||||
close_stream = stream.close
|
||||
self.filename = filename
|
||||
self.stream = stream
|
||||
self.generator = tokenize.generate_tokens(stream.readline)
|
||||
self.generator = tokenize.tokenize(stream.read())
|
||||
self.gettoken() # Initialize lookahead
|
||||
self.dfas, self.startsymbol = self.parse()
|
||||
if close_stream is not None:
|
||||
@ -71,7 +60,7 @@ def make_grammar(self) -> PgenGrammar:
|
||||
c.start = c.symbol2number[self.startsymbol]
|
||||
return c
|
||||
|
||||
def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]:
|
||||
def make_first(self, c: PgenGrammar, name: str) -> dict[int, int]:
|
||||
rawfirst = self.first[name]
|
||||
assert rawfirst is not None
|
||||
first = {}
|
||||
@ -144,14 +133,14 @@ def calcfirst(self, name: str) -> None:
|
||||
dfa = self.dfas[name]
|
||||
self.first[name] = None # dummy to detect left recursion
|
||||
state = dfa[0]
|
||||
totalset: Dict[str, int] = {}
|
||||
totalset: dict[str, int] = {}
|
||||
overlapcheck = {}
|
||||
for label in state.arcs:
|
||||
if label in self.dfas:
|
||||
if label in self.first:
|
||||
fset = self.first[label]
|
||||
if fset is None:
|
||||
raise ValueError("recursion for rule %r" % name)
|
||||
raise ValueError(f"recursion for rule {name!r}")
|
||||
else:
|
||||
self.calcfirst(label)
|
||||
fset = self.first[label]
|
||||
@ -161,18 +150,18 @@ def calcfirst(self, name: str) -> None:
|
||||
else:
|
||||
totalset[label] = 1
|
||||
overlapcheck[label] = {label: 1}
|
||||
inverse: Dict[str, str] = {}
|
||||
inverse: dict[str, str] = {}
|
||||
for label, itsfirst in overlapcheck.items():
|
||||
for symbol in itsfirst:
|
||||
if symbol in inverse:
|
||||
raise ValueError(
|
||||
"rule %s is ambiguous; %s is in the first sets of %s as well"
|
||||
" as %s" % (name, symbol, label, inverse[symbol])
|
||||
f"rule {name} is ambiguous; {symbol} is in the first sets of"
|
||||
f" {label} as well as {inverse[symbol]}"
|
||||
)
|
||||
inverse[symbol] = label
|
||||
self.first[name] = totalset
|
||||
|
||||
def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
||||
def parse(self) -> tuple[dict[str, list["DFAState"]], str]:
|
||||
dfas = {}
|
||||
startsymbol: Optional[str] = None
|
||||
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
||||
@ -197,7 +186,7 @@ def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
||||
assert startsymbol is not None
|
||||
return dfas, startsymbol
|
||||
|
||||
def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
||||
def make_dfa(self, start: "NFAState", finish: "NFAState") -> list["DFAState"]:
|
||||
# To turn an NFA into a DFA, we define the states of the DFA
|
||||
# to correspond to *sets* of states of the NFA. Then do some
|
||||
# state reduction. Let's represent sets as dicts with 1 for
|
||||
@ -205,12 +194,12 @@ def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
||||
assert isinstance(start, NFAState)
|
||||
assert isinstance(finish, NFAState)
|
||||
|
||||
def closure(state: NFAState) -> Dict[NFAState, int]:
|
||||
base: Dict[NFAState, int] = {}
|
||||
def closure(state: NFAState) -> dict[NFAState, int]:
|
||||
base: dict[NFAState, int] = {}
|
||||
addclosure(state, base)
|
||||
return base
|
||||
|
||||
def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
||||
def addclosure(state: NFAState, base: dict[NFAState, int]) -> None:
|
||||
assert isinstance(state, NFAState)
|
||||
if state in base:
|
||||
return
|
||||
@ -221,7 +210,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
||||
|
||||
states = [DFAState(closure(start), finish)]
|
||||
for state in states: # NB states grows while we're iterating
|
||||
arcs: Dict[str, Dict[NFAState, int]] = {}
|
||||
arcs: dict[str, dict[NFAState, int]] = {}
|
||||
for nfastate in state.nfaset:
|
||||
for label, next in nfastate.arcs:
|
||||
if label is not None:
|
||||
@ -248,18 +237,18 @@ def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None:
|
||||
j = len(todo)
|
||||
todo.append(next)
|
||||
if label is None:
|
||||
print(" -> %d" % j)
|
||||
print(f" -> {j}")
|
||||
else:
|
||||
print(" %s -> %d" % (label, j))
|
||||
print(f" {label} -> {j}")
|
||||
|
||||
def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
|
||||
print("Dump of DFA for", name)
|
||||
for i, state in enumerate(dfa):
|
||||
print(" State", i, state.isfinal and "(final)" or "")
|
||||
for label, next in sorted(state.arcs.items()):
|
||||
print(" %s -> %d" % (label, dfa.index(next)))
|
||||
print(f" {label} -> {dfa.index(next)}")
|
||||
|
||||
def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
||||
def simplify_dfa(self, dfa: list["DFAState"]) -> None:
|
||||
# This is not theoretically optimal, but works well enough.
|
||||
# Algorithm: repeatedly look for two states that have the same
|
||||
# set of arcs (same labels pointing to the same nodes) and
|
||||
@ -280,7 +269,7 @@ def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
||||
changes = True
|
||||
break
|
||||
|
||||
def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_rhs(self) -> tuple["NFAState", "NFAState"]:
|
||||
# RHS: ALT ('|' ALT)*
|
||||
a, z = self.parse_alt()
|
||||
if self.value != "|":
|
||||
@ -297,7 +286,7 @@ def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
||||
z.addarc(zz)
|
||||
return aa, zz
|
||||
|
||||
def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_alt(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ALT: ITEM+
|
||||
a, b = self.parse_item()
|
||||
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
|
||||
@ -306,7 +295,7 @@ def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
||||
b = d
|
||||
return a, b
|
||||
|
||||
def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_item(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
||||
if self.value == "[":
|
||||
self.gettoken()
|
||||
@ -326,7 +315,7 @@ def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
||||
else:
|
||||
return a, a
|
||||
|
||||
def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
|
||||
def parse_atom(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ATOM: '(' RHS ')' | NAME | STRING
|
||||
if self.value == "(":
|
||||
self.gettoken()
|
||||
@ -341,15 +330,12 @@ def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
|
||||
return a, z
|
||||
else:
|
||||
self.raise_error(
|
||||
"expected (...) or NAME or STRING, got %s/%s", self.type, self.value
|
||||
f"expected (...) or NAME or STRING, got {self.type}/{self.value}"
|
||||
)
|
||||
raise AssertionError
|
||||
|
||||
def expect(self, type: int, value: Optional[Any] = None) -> str:
|
||||
if self.type != type or (value is not None and self.value != value):
|
||||
self.raise_error(
|
||||
"expected %s/%s, got %s/%s", type, value, self.type, self.value
|
||||
)
|
||||
self.raise_error(f"expected {type}/{value}, got {self.type}/{self.value}")
|
||||
value = self.value
|
||||
self.gettoken()
|
||||
return value
|
||||
@ -361,17 +347,14 @@ def gettoken(self) -> None:
|
||||
self.type, self.value, self.begin, self.end, self.line = tup
|
||||
# print token.tok_name[self.type], repr(self.value)
|
||||
|
||||
def raise_error(self, msg: str, *args: Any) -> NoReturn:
|
||||
if args:
|
||||
try:
|
||||
msg = msg % args
|
||||
except Exception:
|
||||
msg = " ".join([msg] + list(map(str, args)))
|
||||
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
|
||||
def raise_error(self, msg: str) -> NoReturn:
|
||||
raise SyntaxError(
|
||||
msg, (str(self.filename), self.end[0], self.end[1], self.line)
|
||||
)
|
||||
|
||||
|
||||
class NFAState:
|
||||
arcs: List[Tuple[Optional[str], "NFAState"]]
|
||||
arcs: list[tuple[Optional[str], "NFAState"]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.arcs = [] # list of (label, NFAState) pairs
|
||||
@ -383,11 +366,11 @@ def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
|
||||
|
||||
|
||||
class DFAState:
|
||||
nfaset: Dict[NFAState, Any]
|
||||
nfaset: dict[NFAState, Any]
|
||||
isfinal: bool
|
||||
arcs: Dict[str, "DFAState"]
|
||||
arcs: dict[str, "DFAState"]
|
||||
|
||||
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
|
||||
def __init__(self, nfaset: dict[NFAState, Any], final: NFAState) -> None:
|
||||
assert isinstance(nfaset, dict)
|
||||
assert isinstance(next(iter(nfaset)), NFAState)
|
||||
assert isinstance(final, NFAState)
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""Token constants (from "token.h")."""
|
||||
|
||||
from typing import Dict, Final
|
||||
from typing import Final
|
||||
|
||||
# Taken from Python (r53757) and modified to include some tokens
|
||||
# originally monkeypatched in by pgen2.tokenize
|
||||
@ -74,7 +74,7 @@
|
||||
NT_OFFSET: Final = 256
|
||||
# --end constants--
|
||||
|
||||
tok_name: Final[Dict[int, str]] = {}
|
||||
tok_name: Final[dict[int, str]] = {}
|
||||
for _name, _value in list(globals().items()):
|
||||
if type(_value) is int:
|
||||
tok_name[_value] = _name
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,18 +12,8 @@
|
||||
|
||||
# mypy: allow-untyped-defs, allow-incomplete-defs
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from collections.abc import Iterable, Iterator
|
||||
from typing import Any, Optional, TypeVar, Union
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
|
||||
@ -34,7 +24,7 @@
|
||||
|
||||
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
|
||||
|
||||
_type_reprs: Dict[int, Union[str, int]] = {}
|
||||
_type_reprs: dict[int, Union[str, int]] = {}
|
||||
|
||||
|
||||
def type_repr(type_num: int) -> Union[str, int]:
|
||||
@ -57,8 +47,8 @@ def type_repr(type_num: int) -> Union[str, int]:
|
||||
_P = TypeVar("_P", bound="Base")
|
||||
|
||||
NL = Union["Node", "Leaf"]
|
||||
Context = Tuple[str, Tuple[int, int]]
|
||||
RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]]
|
||||
Context = tuple[str, tuple[int, int]]
|
||||
RawNode = tuple[int, Optional[str], Optional[Context], Optional[list[NL]]]
|
||||
|
||||
|
||||
class Base:
|
||||
@ -74,7 +64,7 @@ class Base:
|
||||
# Default values for instance variables
|
||||
type: int # int: token number (< 256) or symbol number (>= 256)
|
||||
parent: Optional["Node"] = None # Parent node pointer, or None
|
||||
children: List[NL] # List of subnodes
|
||||
children: list[NL] # List of subnodes
|
||||
was_changed: bool = False
|
||||
was_checked: bool = False
|
||||
|
||||
@ -135,7 +125,7 @@ def pre_order(self) -> Iterator[NL]:
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def replace(self, new: Union[NL, List[NL]]) -> None:
|
||||
def replace(self, new: Union[NL, list[NL]]) -> None:
|
||||
"""Replace this node with a new one in the parent."""
|
||||
assert self.parent is not None, str(self)
|
||||
assert new is not None
|
||||
@ -242,16 +232,16 @@ def get_suffix(self) -> str:
|
||||
class Node(Base):
|
||||
"""Concrete implementation for interior nodes."""
|
||||
|
||||
fixers_applied: Optional[List[Any]]
|
||||
used_names: Optional[Set[str]]
|
||||
fixers_applied: Optional[list[Any]]
|
||||
used_names: Optional[set[str]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
type: int,
|
||||
children: List[NL],
|
||||
children: list[NL],
|
||||
context: Optional[Any] = None,
|
||||
prefix: Optional[str] = None,
|
||||
fixers_applied: Optional[List[Any]] = None,
|
||||
fixers_applied: Optional[list[Any]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initializer.
|
||||
@ -278,11 +268,7 @@ def __init__(
|
||||
def __repr__(self) -> str:
|
||||
"""Return a canonical string representation."""
|
||||
assert self.type is not None
|
||||
return "{}({}, {!r})".format(
|
||||
self.__class__.__name__,
|
||||
type_repr(self.type),
|
||||
self.children,
|
||||
)
|
||||
return f"{self.__class__.__name__}({type_repr(self.type)}, {self.children!r})"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
@ -363,12 +349,12 @@ def append_child(self, child: NL) -> None:
|
||||
self.invalidate_sibling_maps()
|
||||
|
||||
def invalidate_sibling_maps(self) -> None:
|
||||
self.prev_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
||||
self.next_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
||||
self.prev_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||
self.next_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||
|
||||
def update_sibling_maps(self) -> None:
|
||||
_prev: Dict[int, Optional[NL]] = {}
|
||||
_next: Dict[int, Optional[NL]] = {}
|
||||
_prev: dict[int, Optional[NL]] = {}
|
||||
_next: dict[int, Optional[NL]] = {}
|
||||
self.prev_sibling_map = _prev
|
||||
self.next_sibling_map = _next
|
||||
previous: Optional[NL] = None
|
||||
@ -384,11 +370,11 @@ class Leaf(Base):
|
||||
|
||||
# Default values for instance variables
|
||||
value: str
|
||||
fixers_applied: List[Any]
|
||||
fixers_applied: list[Any]
|
||||
bracket_depth: int
|
||||
# Changed later in brackets.py
|
||||
opening_bracket: Optional["Leaf"] = None
|
||||
used_names: Optional[Set[str]]
|
||||
used_names: Optional[set[str]]
|
||||
_prefix = "" # Whitespace and comments preceding this token in the input
|
||||
lineno: int = 0 # Line where this token starts in the input
|
||||
column: int = 0 # Column where this token starts in the input
|
||||
@ -403,7 +389,7 @@ def __init__(
|
||||
value: str,
|
||||
context: Optional[Context] = None,
|
||||
prefix: Optional[str] = None,
|
||||
fixers_applied: List[Any] = [],
|
||||
fixers_applied: list[Any] = [],
|
||||
opening_bracket: Optional["Leaf"] = None,
|
||||
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
|
||||
) -> None:
|
||||
@ -421,7 +407,7 @@ def __init__(
|
||||
self.value = value
|
||||
if prefix is not None:
|
||||
self._prefix = prefix
|
||||
self.fixers_applied: Optional[List[Any]] = fixers_applied[:]
|
||||
self.fixers_applied: Optional[list[Any]] = fixers_applied[:]
|
||||
self.children = []
|
||||
self.opening_bracket = opening_bracket
|
||||
self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
|
||||
@ -431,10 +417,9 @@ def __repr__(self) -> str:
|
||||
from .pgen2.token import tok_name
|
||||
|
||||
assert self.type is not None
|
||||
return "{}({}, {!r})".format(
|
||||
self.__class__.__name__,
|
||||
tok_name.get(self.type, self.type),
|
||||
self.value,
|
||||
return (
|
||||
f"{self.__class__.__name__}({tok_name.get(self.type, self.type)},"
|
||||
f" {self.value!r})"
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
@ -503,7 +488,7 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL:
|
||||
return Leaf(type, value or "", context=context)
|
||||
|
||||
|
||||
_Results = Dict[str, NL]
|
||||
_Results = dict[str, NL]
|
||||
|
||||
|
||||
class BasePattern:
|
||||
@ -537,7 +522,7 @@ def __repr__(self) -> str:
|
||||
args = [type_repr(self.type), self.content, self.name]
|
||||
while args and args[-1] is None:
|
||||
del args[-1]
|
||||
return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, args)))
|
||||
return f"{self.__class__.__name__}({', '.join(map(repr, args))})"
|
||||
|
||||
def _submatch(self, node, results=None) -> bool:
|
||||
raise NotImplementedError
|
||||
@ -576,7 +561,7 @@ def match(self, node: NL, results: Optional[_Results] = None) -> bool:
|
||||
results[self.name] = node
|
||||
return True
|
||||
|
||||
def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool:
|
||||
def match_seq(self, nodes: list[NL], results: Optional[_Results] = None) -> bool:
|
||||
"""
|
||||
Does this pattern exactly match a sequence of nodes?
|
||||
|
||||
@ -586,7 +571,7 @@ def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool
|
||||
return False
|
||||
return self.match(nodes[0], results)
|
||||
|
||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
||||
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||
"""
|
||||
Generator yielding all matches for this pattern.
|
||||
|
||||
@ -816,7 +801,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||
"""
|
||||
Generator yielding matches for a sequence of nodes.
|
||||
|
||||
@ -861,7 +846,7 @@ def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
if hasattr(sys, "getrefcount"):
|
||||
sys.stderr = save_stderr
|
||||
|
||||
def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||
"""Helper to iteratively yield the matches."""
|
||||
nodelen = len(nodes)
|
||||
if 0 >= self.min:
|
||||
@ -890,7 +875,7 @@ def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
||||
new_results.append((c0 + c1, r))
|
||||
results = new_results
|
||||
|
||||
def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
||||
def _bare_name_matches(self, nodes) -> tuple[int, _Results]:
|
||||
"""Special optimized matcher for bare_name."""
|
||||
count = 0
|
||||
r = {} # type: _Results
|
||||
@ -907,7 +892,7 @@ def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
||||
r[self.name] = nodes[:count]
|
||||
return count, r
|
||||
|
||||
def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]:
|
||||
def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]:
|
||||
"""Helper to recursively yield the matches."""
|
||||
assert self.content is not None
|
||||
if count >= self.min:
|
||||
@ -944,7 +929,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
||||
# We only match an empty sequence of nodes in its entirety
|
||||
return len(nodes) == 0
|
||||
|
||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
||||
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||
if self.content is None:
|
||||
# Return a match if there is an empty sequence
|
||||
if len(nodes) == 0:
|
||||
@ -957,8 +942,8 @@ def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
||||
|
||||
|
||||
def generate_matches(
|
||||
patterns: List[BasePattern], nodes: List[NL]
|
||||
) -> Iterator[Tuple[int, _Results]]:
|
||||
patterns: list[BasePattern], nodes: list[NL]
|
||||
) -> Iterator[tuple[int, _Results]]:
|
||||
"""
|
||||
Generator yielding matches for a sequence of patterns and nodes.
|
||||
|
||||
|
17
tests/data/cases/annotations.py
Normal file
17
tests/data/cases/annotations.py
Normal file
@ -0,0 +1,17 @@
|
||||
# regression test for #1765
|
||||
class Foo:
|
||||
def foo(self):
|
||||
if True:
|
||||
content_ids: Mapping[
|
||||
str, Optional[ContentId]
|
||||
] = self.publisher_content_store.store_config_contents(files)
|
||||
|
||||
# output
|
||||
|
||||
# regression test for #1765
|
||||
class Foo:
|
||||
def foo(self):
|
||||
if True:
|
||||
content_ids: Mapping[str, Optional[ContentId]] = (
|
||||
self.publisher_content_store.store_config_contents(files)
|
||||
)
|
24
tests/data/cases/backslash_before_indent.py
Normal file
24
tests/data/cases/backslash_before_indent.py
Normal file
@ -0,0 +1,24 @@
|
||||
# flags: --minimum-version=3.10
|
||||
class Plotter:
|
||||
\
|
||||
pass
|
||||
|
||||
class AnotherCase:
|
||||
\
|
||||
"""Some
|
||||
\
|
||||
Docstring
|
||||
"""
|
||||
|
||||
# output
|
||||
|
||||
class Plotter:
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class AnotherCase:
|
||||
"""Some
|
||||
\
|
||||
Docstring
|
||||
"""
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
# long variable name
|
||||
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
|
||||
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
|
||||
@ -32,7 +31,8 @@
|
||||
raise ValueError(err.format(key))
|
||||
concatenated_strings = "some strings that are " "concatenated implicitly, so if you put them on separate " "lines it will fit"
|
||||
del concatenated_strings, string_variable_name, normal_function_name, normal_name, need_more_to_make_the_line_long_enough
|
||||
|
||||
del ([], name_1, name_2), [(), [], name_4, name_3], name_1[[name_2 for name_1 in name_0]]
|
||||
del (),
|
||||
|
||||
# output
|
||||
|
||||
@ -92,3 +92,9 @@
|
||||
normal_name,
|
||||
need_more_to_make_the_line_long_enough,
|
||||
)
|
||||
del (
|
||||
([], name_1, name_2),
|
||||
[(), [], name_4, name_3],
|
||||
name_1[[name_2 for name_1 in name_0]],
|
||||
)
|
||||
del ((),)
|
@ -14,5 +14,7 @@ def bob(): # pylint: disable=W9016
|
||||
pass
|
||||
|
||||
|
||||
def bobtwo(): # some comment here
|
||||
def bobtwo():
|
||||
|
||||
# some comment here
|
||||
pass
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --minimum-version=3.8
|
||||
with \
|
||||
make_context_manager1() as cm1, \
|
||||
make_context_manager2() as cm2, \
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --minimum-version=3.9
|
||||
with \
|
||||
make_context_manager1() as cm1, \
|
||||
make_context_manager2() as cm2, \
|
||||
@ -85,6 +84,31 @@ async def func():
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# don't remove the brackets here, it changes the meaning of the code.
|
||||
with (x, y) as z:
|
||||
pass
|
||||
|
||||
|
||||
# don't remove the brackets here, it changes the meaning of the code.
|
||||
# even though the code will always trigger a runtime error
|
||||
with (name_5, name_4), name_5:
|
||||
pass
|
||||
|
||||
|
||||
def test_tuple_as_contextmanager():
|
||||
from contextlib import nullcontext
|
||||
|
||||
try:
|
||||
with (nullcontext(),nullcontext()),nullcontext():
|
||||
pass
|
||||
except TypeError:
|
||||
# test passed
|
||||
pass
|
||||
else:
|
||||
# this should be a type error
|
||||
assert False
|
||||
|
||||
# output
|
||||
|
||||
|
||||
@ -173,3 +197,28 @@ async def func():
|
||||
some_other_function(argument1, argument2, argument3="some_value"),
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
# don't remove the brackets here, it changes the meaning of the code.
|
||||
with (x, y) as z:
|
||||
pass
|
||||
|
||||
|
||||
# don't remove the brackets here, it changes the meaning of the code.
|
||||
# even though the code will always trigger a runtime error
|
||||
with (name_5, name_4), name_5:
|
||||
pass
|
||||
|
||||
|
||||
def test_tuple_as_contextmanager():
|
||||
from contextlib import nullcontext
|
||||
|
||||
try:
|
||||
with (nullcontext(), nullcontext()), nullcontext():
|
||||
pass
|
||||
except TypeError:
|
||||
# test passed
|
||||
pass
|
||||
else:
|
||||
# this should be a type error
|
||||
assert False
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --minimum-version=3.9
|
||||
# This file uses parenthesized context managers introduced in Python 3.9.
|
||||
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
"""
|
||||
87 characters ............................................................................
|
||||
"""
|
13
tests/data/cases/fmtonoff6.py
Normal file
13
tests/data/cases/fmtonoff6.py
Normal file
@ -0,0 +1,13 @@
|
||||
# Regression test for https://github.com/psf/black/issues/2478.
|
||||
def foo():
|
||||
arr = (
|
||||
(3833567325051000, 5, 1, 2, 4229.25, 6, 0),
|
||||
# fmt: off
|
||||
)
|
||||
|
||||
|
||||
# Regression test for https://github.com/psf/black/issues/3458.
|
||||
dependencies = {
|
||||
a: b,
|
||||
# fmt: off
|
||||
}
|
9
tests/data/cases/fmtskip10.py
Normal file
9
tests/data/cases/fmtskip10.py
Normal file
@ -0,0 +1,9 @@
|
||||
# flags: --preview
|
||||
def foo(): return "mock" # fmt: skip
|
||||
if True: print("yay") # fmt: skip
|
||||
for i in range(10): print(i) # fmt: skip
|
||||
|
||||
j = 1 # fmt: skip
|
||||
while j < 10: j += 1 # fmt: skip
|
||||
|
||||
b = [c for c in "A very long string that would normally generate some kind of collapse, since it is this long"] # fmt: skip
|
6
tests/data/cases/fmtskip11.py
Normal file
6
tests/data/cases/fmtskip11.py
Normal file
@ -0,0 +1,6 @@
|
||||
def foo():
|
||||
pass
|
||||
|
||||
|
||||
# comment 1 # fmt: skip
|
||||
# comment 2
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
print () # fmt: skip
|
||||
print () # fmt:skip
|
||||
|
||||
|
@ -156,6 +156,7 @@ def something(self):
|
||||
|
||||
#
|
||||
|
||||
|
||||
#
|
||||
pass
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
x = "\x1F"
|
||||
x = "\\x1B"
|
||||
x = "\\\x1B"
|
67
tests/data/cases/fstring_quotations.py
Normal file
67
tests/data/cases/fstring_quotations.py
Normal file
@ -0,0 +1,67 @@
|
||||
# Regression tests for long f-strings, including examples from issue #3623
|
||||
|
||||
a = (
|
||||
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
|
||||
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
)
|
||||
|
||||
a = (
|
||||
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
|
||||
)
|
||||
|
||||
a = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + \
|
||||
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
|
||||
a = f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"' + \
|
||||
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
|
||||
a = (
|
||||
f'bbbbbbb"{"b"}"'
|
||||
'aaaaaaaa'
|
||||
)
|
||||
|
||||
a = (
|
||||
f'"{"b"}"'
|
||||
)
|
||||
|
||||
a = (
|
||||
f'\"{"b"}\"'
|
||||
)
|
||||
|
||||
a = (
|
||||
r'\"{"b"}\"'
|
||||
)
|
||||
|
||||
# output
|
||||
|
||||
# Regression tests for long f-strings, including examples from issue #3623
|
||||
|
||||
a = (
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
)
|
||||
|
||||
a = (
|
||||
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
)
|
||||
|
||||
a = (
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
+ f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
)
|
||||
|
||||
a = (
|
||||
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
+ f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||
)
|
||||
|
||||
a = f'bbbbbbb"{"b"}"' "aaaaaaaa"
|
||||
|
||||
a = f'"{"b"}"'
|
||||
|
||||
a = f'"{"b"}"'
|
||||
|
||||
a = r'\"{"b"}\"'
|
||||
|
@ -1,4 +1,4 @@
|
||||
# flags: --preview --minimum-version=3.10
|
||||
# flags: --minimum-version=3.10
|
||||
# normal, short, function definition
|
||||
def foo(a, b) -> tuple[int, float]: ...
|
||||
|
||||
@ -142,6 +142,7 @@ def SimplePyFn(
|
||||
Buffer[UInt8, 2],
|
||||
Buffer[UInt8, 2],
|
||||
]: ...
|
||||
|
||||
# output
|
||||
# normal, short, function definition
|
||||
def foo(a, b) -> tuple[int, float]: ...
|
||||
|
@ -60,6 +60,64 @@ def func() -> ((also_super_long_type_annotation_that_may_cause_an_AST_related_cr
|
||||
argument1, (one, two,), argument4, argument5, argument6
|
||||
)
|
||||
|
||||
def foo() -> (
|
||||
# comment inside parenthesised return type
|
||||
int
|
||||
):
|
||||
...
|
||||
|
||||
def foo() -> (
|
||||
# comment inside parenthesised return type
|
||||
# more
|
||||
int
|
||||
# another
|
||||
):
|
||||
...
|
||||
|
||||
def foo() -> (
|
||||
# comment inside parenthesised new union return type
|
||||
int | str | bytes
|
||||
):
|
||||
...
|
||||
|
||||
def foo() -> (
|
||||
# comment inside plain tuple
|
||||
):
|
||||
pass
|
||||
|
||||
def foo(arg: (# comment with non-return annotation
|
||||
int
|
||||
# comment with non-return annotation
|
||||
)):
|
||||
pass
|
||||
|
||||
def foo(arg: (# comment with non-return annotation
|
||||
int | range | memoryview
|
||||
# comment with non-return annotation
|
||||
)):
|
||||
pass
|
||||
|
||||
def foo(arg: (# only before
|
||||
int
|
||||
)):
|
||||
pass
|
||||
|
||||
def foo(arg: (
|
||||
int
|
||||
# only after
|
||||
)):
|
||||
pass
|
||||
|
||||
variable: ( # annotation
|
||||
because
|
||||
# why not
|
||||
)
|
||||
|
||||
variable: (
|
||||
because
|
||||
# why not
|
||||
)
|
||||
|
||||
# output
|
||||
|
||||
def f(
|
||||
@ -176,3 +234,75 @@ def func() -> (
|
||||
argument5,
|
||||
argument6,
|
||||
)
|
||||
|
||||
|
||||
def foo() -> (
|
||||
# comment inside parenthesised return type
|
||||
int
|
||||
): ...
|
||||
|
||||
|
||||
def foo() -> (
|
||||
# comment inside parenthesised return type
|
||||
# more
|
||||
int
|
||||
# another
|
||||
): ...
|
||||
|
||||
|
||||
def foo() -> (
|
||||
# comment inside parenthesised new union return type
|
||||
int
|
||||
| str
|
||||
| bytes
|
||||
): ...
|
||||
|
||||
|
||||
def foo() -> (
|
||||
# comment inside plain tuple
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
def foo(
|
||||
arg: ( # comment with non-return annotation
|
||||
int
|
||||
# comment with non-return annotation
|
||||
),
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
def foo(
|
||||
arg: ( # comment with non-return annotation
|
||||
int
|
||||
| range
|
||||
| memoryview
|
||||
# comment with non-return annotation
|
||||
),
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
def foo(arg: int): # only before
|
||||
pass
|
||||
|
||||
|
||||
def foo(
|
||||
arg: (
|
||||
int
|
||||
# only after
|
||||
),
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
variable: ( # annotation
|
||||
because
|
||||
# why not
|
||||
)
|
||||
|
||||
variable: (
|
||||
because
|
||||
# why not
|
||||
)
|
||||
|
307
tests/data/cases/generics_wrapping.py
Normal file
307
tests/data/cases/generics_wrapping.py
Normal file
@ -0,0 +1,307 @@
|
||||
# flags: --minimum-version=3.12
|
||||
def plain[T, B](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
def arg_magic[T, B](a: T, b: T,) -> T:
|
||||
return a
|
||||
|
||||
def type_param_magic[T, B,](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
def both_magic[T, B,](a: T, b: T,) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def plain_multiline[
|
||||
T,
|
||||
B
|
||||
](
|
||||
a: T,
|
||||
b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
def arg_magic_multiline[
|
||||
T,
|
||||
B
|
||||
](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
def type_param_magic_multiline[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T,
|
||||
b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
def both_magic_multiline[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def plain_mixed1[
|
||||
T,
|
||||
B
|
||||
](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
def plain_mixed2[T, B](
|
||||
a: T,
|
||||
b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
def arg_magic_mixed1[
|
||||
T,
|
||||
B
|
||||
](a: T, b: T,) -> T:
|
||||
return a
|
||||
|
||||
def arg_magic_mixed2[T, B](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
def type_param_magic_mixed1[
|
||||
T,
|
||||
B,
|
||||
](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
def type_param_magic_mixed2[T, B,](
|
||||
a: T,
|
||||
b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
def both_magic_mixed1[
|
||||
T,
|
||||
B,
|
||||
](a: T, b: T,) -> T:
|
||||
return a
|
||||
|
||||
def both_magic_mixed2[T, B,](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
def something_something_function[
|
||||
T: Model
|
||||
](param: list[int], other_param: type[T], *, some_other_param: bool = True) -> QuerySet[
|
||||
T
|
||||
]:
|
||||
pass
|
||||
|
||||
|
||||
def func[A_LOT_OF_GENERIC_TYPES: AreBeingDefinedHere, LIKE_THIS, AND_THIS, ANOTHER_ONE, AND_YET_ANOTHER_ONE: ThisOneHasTyping](a: T, b: T, c: T, d: T, e: T, f: T, g: T, h: T, i: T, j: T, k: T, l: T, m: T, n: T, o: T, p: T) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def with_random_comments[
|
||||
Z
|
||||
# bye
|
||||
]():
|
||||
return a
|
||||
|
||||
|
||||
def func[
|
||||
T, # comment
|
||||
U # comment
|
||||
,
|
||||
Z: # comment
|
||||
int
|
||||
](): pass
|
||||
|
||||
|
||||
def func[
|
||||
T, # comment but it's long so it doesn't just move to the end of the line
|
||||
U # comment comment comm comm ent ent
|
||||
,
|
||||
Z: # comment ent ent comm comm comment
|
||||
int
|
||||
](): pass
|
||||
|
||||
|
||||
# output
|
||||
def plain[T, B](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def arg_magic[T, B](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def type_param_magic[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T, b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def both_magic[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def plain_multiline[T, B](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def arg_magic_multiline[T, B](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def type_param_magic_multiline[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T, b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def both_magic_multiline[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def plain_mixed1[T, B](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def plain_mixed2[T, B](a: T, b: T) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def arg_magic_mixed1[T, B](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def arg_magic_mixed2[T, B](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def type_param_magic_mixed1[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T, b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def type_param_magic_mixed2[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T, b: T
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def both_magic_mixed1[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def both_magic_mixed2[
|
||||
T,
|
||||
B,
|
||||
](
|
||||
a: T,
|
||||
b: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def something_something_function[T: Model](
|
||||
param: list[int], other_param: type[T], *, some_other_param: bool = True
|
||||
) -> QuerySet[T]:
|
||||
pass
|
||||
|
||||
|
||||
def func[
|
||||
A_LOT_OF_GENERIC_TYPES: AreBeingDefinedHere,
|
||||
LIKE_THIS,
|
||||
AND_THIS,
|
||||
ANOTHER_ONE,
|
||||
AND_YET_ANOTHER_ONE: ThisOneHasTyping,
|
||||
](
|
||||
a: T,
|
||||
b: T,
|
||||
c: T,
|
||||
d: T,
|
||||
e: T,
|
||||
f: T,
|
||||
g: T,
|
||||
h: T,
|
||||
i: T,
|
||||
j: T,
|
||||
k: T,
|
||||
l: T,
|
||||
m: T,
|
||||
n: T,
|
||||
o: T,
|
||||
p: T,
|
||||
) -> T:
|
||||
return a
|
||||
|
||||
|
||||
def with_random_comments[
|
||||
Z
|
||||
# bye
|
||||
]():
|
||||
return a
|
||||
|
||||
|
||||
def func[T, U, Z: int](): # comment # comment # comment
|
||||
pass
|
||||
|
||||
|
||||
def func[
|
||||
T, # comment but it's long so it doesn't just move to the end of the line
|
||||
U, # comment comment comm comm ent ent
|
||||
Z: int, # comment ent ent comm comm comment
|
||||
]():
|
||||
pass
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
m2 = None if not isinstance(dist, Normal) else m** 2 + s * 2
|
||||
m3 = None if not isinstance(dist, Normal) else m ** 2 + s * 2
|
||||
m4 = None if not isinstance(dist, Normal) else m**2 + s * 2
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
def func(
|
||||
arg1,
|
||||
arg2,
|
@ -1,7 +1,6 @@
|
||||
# flags: --preview
|
||||
"""I am a very helpful module docstring.
|
||||
|
||||
With trailing spaces (only removed with unify_docstring_detection on):
|
||||
With trailing spaces:
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
||||
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||
Ut enim ad minim veniam,
|
||||
@ -39,7 +38,7 @@
|
||||
# output
|
||||
"""I am a very helpful module docstring.
|
||||
|
||||
With trailing spaces (only removed with unify_docstring_detection on):
|
||||
With trailing spaces:
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
||||
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||
Ut enim ad minim veniam,
|
||||
|
@ -62,5 +62,4 @@ class MultilineDocstringsAsWell:
|
||||
|
||||
|
||||
class SingleQuotedDocstring:
|
||||
|
||||
"I'm a docstring but I don't even get triple quotes."
|
||||
|
@ -1,4 +1,4 @@
|
||||
# flags: --preview --minimum-version=3.10
|
||||
# flags: --minimum-version=3.10
|
||||
match match:
|
||||
case "test" if case != "not very loooooooooooooog condition": # comment
|
||||
pass
|
||||
|
8
tests/data/cases/pep646_typed_star_arg_type_var_tuple.py
Normal file
8
tests/data/cases/pep646_typed_star_arg_type_var_tuple.py
Normal file
@ -0,0 +1,8 @@
|
||||
# flags: --minimum-version=3.11
|
||||
|
||||
|
||||
def fn(*args: *tuple[*A, B]) -> None:
|
||||
pass
|
||||
|
||||
|
||||
fn.__annotations__
|
@ -1,4 +1,3 @@
|
||||
# flags: --minimum-version=3.8
|
||||
def positional_only_arg(a, /):
|
||||
pass
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --minimum-version=3.8
|
||||
(a := 1)
|
||||
(a := a)
|
||||
if (match := pattern.search(data)) is None:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user