Compare commits
133 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7987951e24 | ||
![]() |
e5e5dad792 | ||
![]() |
24e4cb20ab | ||
![]() |
e7bf7b4619 | ||
![]() |
71e380aedf | ||
![]() |
2630801f95 | ||
![]() |
b0f36f5b42 | ||
![]() |
314f8cf92b | ||
![]() |
d0ff3bd6cb | ||
![]() |
a41dc89f1f | ||
![]() |
950ec38c11 | ||
![]() |
2c135edf37 | ||
![]() |
6144c46c6a | ||
![]() |
dd278cb316 | ||
![]() |
dbb14eac93 | ||
![]() |
5342d2eeda | ||
![]() |
9f38928414 | ||
![]() |
3e9dd25dad | ||
![]() |
bb802cf19a | ||
![]() |
5ae38dd370 | ||
![]() |
45cbe572ee | ||
![]() |
fccd70cff1 | ||
![]() |
00c0d6d91a | ||
![]() |
0580ecbef3 | ||
![]() |
ed64d89faa | ||
![]() |
452d3b68f4 | ||
![]() |
256f3420b1 | ||
![]() |
00cb6d15c5 | ||
![]() |
14e1de805a | ||
![]() |
5f23701708 | ||
![]() |
9c129567e7 | ||
![]() |
c02ca47daa | ||
![]() |
edaf085a18 | ||
![]() |
b844c8a136 | ||
![]() |
d82da0f0e9 | ||
![]() |
8a737e727a | ||
![]() |
d330deea00 | ||
![]() |
3d8129001f | ||
![]() |
459562c71a | ||
![]() |
99dbf3006b | ||
![]() |
c0b92f3888 | ||
![]() |
e58baf15b9 | ||
![]() |
1455ae4731 | ||
![]() |
584d0331c8 | ||
![]() |
6e9654065c | ||
![]() |
8dc912774e | ||
![]() |
40b73f2fb5 | ||
![]() |
e157ba4de5 | ||
![]() |
fdabd424e2 | ||
![]() |
9431e98522 | ||
![]() |
3b00112ac5 | ||
![]() |
0aabac4fe0 | ||
![]() |
ed33205579 | ||
![]() |
6000d37f09 | ||
![]() |
30759ca782 | ||
![]() |
84ac1a947d | ||
![]() |
0db1173bbc | ||
![]() |
3fab5ade71 | ||
![]() |
e54f86bae4 | ||
![]() |
96ca1b6be3 | ||
![]() |
17efac45f9 | ||
![]() |
73f651f02f | ||
![]() |
f6c7c98f34 | ||
![]() |
d670b0439c | ||
![]() |
56896264e4 | ||
![]() |
efd9778873 | ||
![]() |
c472557ba8 | ||
![]() |
53a219056d | ||
![]() |
c98fc0c128 | ||
![]() |
f54f34799b | ||
![]() |
484a669699 | ||
![]() |
fff747d61b | ||
![]() |
9995bffbe4 | ||
![]() |
7452902c77 | ||
![]() |
32ebb93003 | ||
![]() |
1b2427a2b7 | ||
![]() |
a22b1ebbfd | ||
![]() |
b7d0e7212b | ||
![]() |
f1a2f92bba | ||
![]() |
8d9d18c033 | ||
![]() |
bbfdba3a5e | ||
![]() |
8fb2add1f7 | ||
![]() |
2a45cecf29 | ||
![]() |
b4d6d8632d | ||
![]() |
ac018c16ca | ||
![]() |
058da5f81a | ||
![]() |
98a580bbdc | ||
![]() |
f50aba4984 | ||
![]() |
6b27ef53e2 | ||
![]() |
26aeebe9fb | ||
![]() |
9e13708be8 | ||
![]() |
ac28187bf4 | ||
![]() |
823a7b0ff0 | ||
![]() |
699b45aef7 | ||
![]() |
c20423249e | ||
![]() |
5ec91686ff | ||
![]() |
7e3e8f5bd9 | ||
![]() |
b965c2a502 | ||
![]() |
9ccf279a17 | ||
![]() |
14b6e61970 | ||
![]() |
b1c4dd96d7 | ||
![]() |
4b4ae43e8b | ||
![]() |
7fa1faf83a | ||
![]() |
8827accf56 | ||
![]() |
b0da11d370 | ||
![]() |
721dff5493 | ||
![]() |
7e2afc9bfd | ||
![]() |
1ad5263f2f | ||
![]() |
9ff047a957 | ||
![]() |
5e571ccbbe | ||
![]() |
978bc505ac | ||
![]() |
b1f7b9f87d | ||
![]() |
b677a643c5 | ||
![]() |
8447af4d8d | ||
![]() |
9c1fd463e1 | ||
![]() |
b9c63230b4 | ||
![]() |
4af12c499e | ||
![]() |
c827551b23 | ||
![]() |
3be19b306f | ||
![]() |
f71925885c | ||
![]() |
ccfb0db4d5 | ||
![]() |
c801cd60b1 | ||
![]() |
f22b2437d5 | ||
![]() |
75eb55764e | ||
![]() |
f2da85fe7f | ||
![]() |
0ab0b75717 | ||
![]() |
0c033f3eb7 | ||
![]() |
455de7703e | ||
![]() |
dbb956b0d3 | ||
![]() |
3702ba224e | ||
![]() |
e4aaa8a994 | ||
![]() |
ba88fc372e | ||
![]() |
5683242fd4 |
@ -1,4 +1,3 @@
|
|||||||
node: $Format:%H$
|
node: $Format:%H$
|
||||||
node-date: $Format:%cI$
|
node-date: $Format:%cI$
|
||||||
describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
|
describe-name: $Format:%(describe:tags=true,match=[0-9]*)$
|
||||||
ref-names: $Format:%D$
|
|
||||||
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1 +1,2 @@
|
|||||||
.git_archival.txt export-subst
|
.git_archival.txt export-subst
|
||||||
|
*.py diff=python
|
||||||
|
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -12,7 +12,9 @@ current development version. To confirm this, you have three options:
|
|||||||
|
|
||||||
1. Update Black's version if a newer release exists: `pip install -U black`
|
1. Update Black's version if a newer release exists: `pip install -U black`
|
||||||
2. Use the online formatter at <https://black.vercel.app/?version=main>, which will use
|
2. Use the online formatter at <https://black.vercel.app/?version=main>, which will use
|
||||||
the latest main branch.
|
the latest main branch. Note that the online formatter currently runs on
|
||||||
|
an older version of Python and may not support newer syntax, such as the
|
||||||
|
extended f-string syntax added in Python 3.12.
|
||||||
3. Or run _Black_ on your machine:
|
3. Or run _Black_ on your machine:
|
||||||
- create a new virtualenv (make sure it's the same Python version);
|
- create a new virtualenv (make sure it's the same Python version);
|
||||||
- clone this repository;
|
- clone this repository;
|
||||||
|
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@ -14,4 +14,3 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
labels: ["skip news", "C: dependencies", "T: documentation"]
|
labels: ["skip news", "C: dependencies", "T: documentation"]
|
||||||
reviewers: ["ichard26"]
|
|
||||||
|
24
.github/workflows/diff_shades.yml
vendored
24
.github/workflows/diff_shades.yml
vendored
@ -26,7 +26,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install diff-shades and support dependencies
|
- name: Install diff-shades and support dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install 'click==8.1.3' packaging urllib3
|
python -m pip install 'click>=8.1.7' packaging urllib3
|
||||||
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
|
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
|
||||||
|
|
||||||
- name: Calculate run configuration & metadata
|
- name: Calculate run configuration & metadata
|
||||||
@ -34,7 +34,8 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
run: >
|
run: >
|
||||||
python scripts/diff_shades_gha_helper.py config ${{ github.event_name }} ${{ matrix.mode }}
|
python scripts/diff_shades_gha_helper.py config ${{ github.event_name }}
|
||||||
|
${{ matrix.mode }}
|
||||||
|
|
||||||
analysis:
|
analysis:
|
||||||
name: analysis / ${{ matrix.mode }}
|
name: analysis / ${{ matrix.mode }}
|
||||||
@ -44,7 +45,7 @@ jobs:
|
|||||||
HATCH_BUILD_HOOKS_ENABLE: "1"
|
HATCH_BUILD_HOOKS_ENABLE: "1"
|
||||||
# Clang is less picky with the C code it's given than gcc (and may
|
# Clang is less picky with the C code it's given than gcc (and may
|
||||||
# generate faster binaries too).
|
# generate faster binaries too).
|
||||||
CC: clang-14
|
CC: clang-18
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@ -64,7 +65,7 @@ jobs:
|
|||||||
- name: Install diff-shades and support dependencies
|
- name: Install diff-shades and support dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
|
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
|
||||||
python -m pip install 'click==8.1.3' packaging urllib3
|
python -m pip install 'click>=8.1.7' packaging urllib3
|
||||||
# After checking out old revisions, this might not exist so we'll use a copy.
|
# After checking out old revisions, this might not exist so we'll use a copy.
|
||||||
cat scripts/diff_shades_gha_helper.py > helper.py
|
cat scripts/diff_shades_gha_helper.py > helper.py
|
||||||
git config user.name "diff-shades-gha"
|
git config user.name "diff-shades-gha"
|
||||||
@ -110,19 +111,19 @@ jobs:
|
|||||||
${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
|
${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
|
||||||
|
|
||||||
- name: Upload diff report
|
- name: Upload diff report
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.mode }}-diff.html
|
name: ${{ matrix.mode }}-diff.html
|
||||||
path: diff.html
|
path: diff.html
|
||||||
|
|
||||||
- name: Upload baseline analysis
|
- name: Upload baseline analysis
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.baseline-analysis }}
|
name: ${{ matrix.baseline-analysis }}
|
||||||
path: ${{ matrix.baseline-analysis }}
|
path: ${{ matrix.baseline-analysis }}
|
||||||
|
|
||||||
- name: Upload target analysis
|
- name: Upload target analysis
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.target-analysis }}
|
name: ${{ matrix.target-analysis }}
|
||||||
path: ${{ matrix.target-analysis }}
|
path: ${{ matrix.target-analysis }}
|
||||||
@ -130,14 +131,13 @@ jobs:
|
|||||||
- name: Generate summary file (PR only)
|
- name: Generate summary file (PR only)
|
||||||
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
|
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
|
||||||
run: >
|
run: >
|
||||||
python helper.py comment-body
|
python helper.py comment-body ${{ matrix.baseline-analysis }}
|
||||||
${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
|
${{ matrix.target-analysis }} ${{ matrix.baseline-sha }}
|
||||||
${{ matrix.baseline-sha }} ${{ matrix.target-sha }}
|
${{ matrix.target-sha }} ${{ github.event.pull_request.number }}
|
||||||
${{ github.event.pull_request.number }}
|
|
||||||
|
|
||||||
- name: Upload summary file (PR only)
|
- name: Upload summary file (PR only)
|
||||||
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
|
if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: .pr-comment.json
|
name: .pr-comment.json
|
||||||
path: .pr-comment.json
|
path: .pr-comment.json
|
||||||
|
8
.github/workflows/doc.yml
vendored
8
.github/workflows/doc.yml
vendored
@ -26,13 +26,15 @@ jobs:
|
|||||||
- name: Set up latest Python
|
- name: Set up latest Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "*"
|
python-version: "3.13"
|
||||||
|
allow-prereleases: true
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install uv
|
python -m pip install uv
|
||||||
python -m uv pip install --system -e ".[d]"
|
python -m uv venv
|
||||||
python -m uv pip install --system -r "docs/requirements.txt"
|
python -m uv pip install -e ".[d]"
|
||||||
|
python -m uv pip install -r "docs/requirements.txt"
|
||||||
|
|
||||||
- name: Build documentation
|
- name: Build documentation
|
||||||
run: sphinx-build -a -b html -W --keep-going docs/ docs/_build
|
run: sphinx-build -a -b html -W --keep-going docs/ docs/_build
|
||||||
|
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
|||||||
latest_non_release)" >> $GITHUB_ENV
|
latest_non_release)" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
@ -47,7 +47,7 @@ jobs:
|
|||||||
if:
|
if:
|
||||||
${{ github.event_name == 'release' && github.event.action == 'published' &&
|
${{ github.event_name == 'release' && github.event.action == 'published' &&
|
||||||
!github.event.release.prerelease }}
|
!github.event.release.prerelease }}
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
@ -58,7 +58,7 @@ jobs:
|
|||||||
if:
|
if:
|
||||||
${{ github.event_name == 'release' && github.event.action == 'published' &&
|
${{ github.event_name == 'release' && github.event.action == 'published' &&
|
||||||
github.event.release.prerelease }}
|
github.event.release.prerelease }}
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
|
3
.github/workflows/fuzz.yml
vendored
3
.github/workflows/fuzz.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
|
python-version: ["3.9", "3.10", "3.11", "3.12.4", "3.13"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@ -31,6 +31,7 @@ jobs:
|
|||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
allow-prereleases: true
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
3
.github/workflows/lint.yml
vendored
3
.github/workflows/lint.yml
vendored
@ -26,7 +26,8 @@ jobs:
|
|||||||
- name: Set up latest Python
|
- name: Set up latest Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "*"
|
python-version: "3.13"
|
||||||
|
allow-prereleases: true
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
17
.github/workflows/pypi_upload.yml
vendored
17
.github/workflows/pypi_upload.yml
vendored
@ -23,7 +23,8 @@ jobs:
|
|||||||
- name: Set up latest Python
|
- name: Set up latest Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "*"
|
python-version: "3.13"
|
||||||
|
allow-prereleases: true
|
||||||
|
|
||||||
- name: Install latest pip, build, twine
|
- name: Install latest pip, build, twine
|
||||||
run: |
|
run: |
|
||||||
@ -46,10 +47,11 @@ jobs:
|
|||||||
include: ${{ steps.set-matrix.outputs.include }}
|
include: ${{ steps.set-matrix.outputs.include }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
# Keep cibuildwheel version in sync with below
|
||||||
- name: Install cibuildwheel and pypyp
|
- name: Install cibuildwheel and pypyp
|
||||||
run: |
|
run: |
|
||||||
pipx install cibuildwheel==2.15.0
|
pipx install cibuildwheel==2.22.0
|
||||||
pipx install pypyp==1
|
pipx install pypyp==1.3.0
|
||||||
- name: generate matrix
|
- name: generate matrix
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
run: |
|
run: |
|
||||||
@ -73,7 +75,7 @@ jobs:
|
|||||||
| pyp 'json.dumps({"only": x, "os": "ubuntu-latest"})'
|
| pyp 'json.dumps({"only": x, "os": "ubuntu-latest"})'
|
||||||
} | pyp 'json.dumps(list(map(json.loads, lines)))' > /tmp/matrix
|
} | pyp 'json.dumps(list(map(json.loads, lines)))' > /tmp/matrix
|
||||||
env:
|
env:
|
||||||
CIBW_BUILD: "cp38-* cp312-*"
|
CIBW_BUILD: "cp39-* cp313-*"
|
||||||
CIBW_ARCHS_LINUX: x86_64
|
CIBW_ARCHS_LINUX: x86_64
|
||||||
- id: set-matrix
|
- id: set-matrix
|
||||||
run: echo "include=$(cat /tmp/matrix)" | tee -a $GITHUB_OUTPUT
|
run: echo "include=$(cat /tmp/matrix)" | tee -a $GITHUB_OUTPUT
|
||||||
@ -89,14 +91,15 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: pypa/cibuildwheel@v2.17.0
|
# Keep cibuildwheel version in sync with above
|
||||||
|
- uses: pypa/cibuildwheel@v2.23.3
|
||||||
with:
|
with:
|
||||||
only: ${{ matrix.only }}
|
only: ${{ matrix.only }}
|
||||||
|
|
||||||
- name: Upload wheels as workflow artifacts
|
- name: Upload wheels as workflow artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.name }}-mypyc-wheels
|
name: ${{ matrix.only }}-mypyc-wheels
|
||||||
path: ./wheelhouse/*.whl
|
path: ./wheelhouse/*.whl
|
||||||
|
|
||||||
- if: github.event_name == 'release'
|
- if: github.event_name == 'release'
|
||||||
|
2
.github/workflows/release_tests.yml
vendored
2
.github/workflows/release_tests.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-version: ["3.12"]
|
python-version: ["3.13"]
|
||||||
os: [macOS-latest, ubuntu-latest, windows-latest]
|
os: [macOS-latest, ubuntu-latest, windows-latest]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
9
.github/workflows/test.yml
vendored
9
.github/workflows/test.yml
vendored
@ -31,7 +31,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "pypy-3.9"]
|
python-version: ["3.9", "3.10", "3.11", "3.12.4", "3.13", "pypy-3.9"]
|
||||||
os: [ubuntu-latest, macOS-latest, windows-latest]
|
os: [ubuntu-latest, macOS-latest, windows-latest]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@ -41,6 +41,7 @@ jobs:
|
|||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
allow-prereleases: true
|
||||||
|
|
||||||
- name: Install tox
|
- name: Install tox
|
||||||
run: |
|
run: |
|
||||||
@ -62,7 +63,7 @@ jobs:
|
|||||||
if:
|
if:
|
||||||
github.repository == 'psf/black' && matrix.os == 'ubuntu-latest' &&
|
github.repository == 'psf/black' && matrix.os == 'ubuntu-latest' &&
|
||||||
!startsWith(matrix.python-version, 'pypy')
|
!startsWith(matrix.python-version, 'pypy')
|
||||||
uses: AndreMiras/coveralls-python-action@8799c9f4443ac4201d2e2f2c725d577174683b99
|
uses: AndreMiras/coveralls-python-action@ac868b9540fad490f7ca82b8ca00480fd751ed19
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
parallel: true
|
parallel: true
|
||||||
@ -77,7 +78,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Send finished signal to Coveralls
|
- name: Send finished signal to Coveralls
|
||||||
uses: AndreMiras/coveralls-python-action@8799c9f4443ac4201d2e2f2c725d577174683b99
|
uses: AndreMiras/coveralls-python-action@ac868b9540fad490f7ca82b8ca00480fd751ed19
|
||||||
with:
|
with:
|
||||||
parallel-finished: true
|
parallel-finished: true
|
||||||
debug: true
|
debug: true
|
||||||
@ -98,7 +99,7 @@ jobs:
|
|||||||
- name: Set up latest Python
|
- name: Set up latest Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "*"
|
python-version: "3.12.4"
|
||||||
|
|
||||||
- name: Install black with uvloop
|
- name: Install black with uvloop
|
||||||
run: |
|
run: |
|
||||||
|
6
.github/workflows/upload_binary.yml
vendored
6
.github/workflows/upload_binary.yml
vendored
@ -13,13 +13,13 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [windows-2019, ubuntu-20.04, macos-latest]
|
os: [windows-2019, ubuntu-22.04, macos-latest]
|
||||||
include:
|
include:
|
||||||
- os: windows-2019
|
- os: windows-2019
|
||||||
pathsep: ";"
|
pathsep: ";"
|
||||||
asset_name: black_windows.exe
|
asset_name: black_windows.exe
|
||||||
executable_mime: "application/vnd.microsoft.portable-executable"
|
executable_mime: "application/vnd.microsoft.portable-executable"
|
||||||
- os: ubuntu-20.04
|
- os: ubuntu-22.04
|
||||||
pathsep: ":"
|
pathsep: ":"
|
||||||
asset_name: black_linux
|
asset_name: black_linux
|
||||||
executable_mime: "application/x-executable"
|
executable_mime: "application/x-executable"
|
||||||
@ -34,7 +34,7 @@ jobs:
|
|||||||
- name: Set up latest Python
|
- name: Set up latest Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "*"
|
python-version: "3.12.4"
|
||||||
|
|
||||||
- name: Install Black and PyInstaller
|
- name: Install Black and PyInstaller
|
||||||
run: |
|
run: |
|
||||||
|
@ -24,12 +24,12 @@ repos:
|
|||||||
additional_dependencies: *version_check_dependencies
|
additional_dependencies: *version_check_dependencies
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
rev: 5.13.2
|
rev: 6.0.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: isort
|
- id: isort
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/flake8
|
- repo: https://github.com/pycqa/flake8
|
||||||
rev: 7.0.0
|
rev: 7.2.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
@ -39,17 +39,21 @@ repos:
|
|||||||
exclude: ^src/blib2to3/
|
exclude: ^src/blib2to3/
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
rev: v1.9.0
|
rev: v1.15.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: mypy
|
- id: mypy
|
||||||
exclude: ^(docs/conf.py|scripts/generate_schema.py)$
|
exclude: ^(docs/conf.py|scripts/generate_schema.py)$
|
||||||
args: []
|
args: []
|
||||||
additional_dependencies: &mypy_deps
|
additional_dependencies: &mypy_deps
|
||||||
- types-PyYAML
|
- types-PyYAML
|
||||||
|
- types-atheris
|
||||||
- tomli >= 0.2.6, < 2.0.0
|
- tomli >= 0.2.6, < 2.0.0
|
||||||
- click >= 8.1.0, != 8.1.4, != 8.1.5
|
- click >= 8.2.0
|
||||||
|
# Click is intentionally out-of-sync with pyproject.toml
|
||||||
|
# v8.2 has breaking changes. We work around them at runtime, but we need the newer stubs.
|
||||||
- packaging >= 22.0
|
- packaging >= 22.0
|
||||||
- platformdirs >= 2.1.0
|
- platformdirs >= 2.1.0
|
||||||
|
- pytokens >= 0.1.10
|
||||||
- pytest
|
- pytest
|
||||||
- hypothesis
|
- hypothesis
|
||||||
- aiohttp >= 3.7.4
|
- aiohttp >= 3.7.4
|
||||||
@ -62,14 +66,15 @@ repos:
|
|||||||
args: ["--python-version=3.10"]
|
args: ["--python-version=3.10"]
|
||||||
additional_dependencies: *mypy_deps
|
additional_dependencies: *mypy_deps
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
- repo: https://github.com/rbubley/mirrors-prettier
|
||||||
rev: v4.0.0-alpha.8
|
rev: v3.5.3
|
||||||
hooks:
|
hooks:
|
||||||
- id: prettier
|
- id: prettier
|
||||||
|
types_or: [markdown, yaml, json]
|
||||||
exclude: \.github/workflows/diff_shades\.yml
|
exclude: \.github/workflows/diff_shades\.yml
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.5.0
|
rev: v5.0.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: end-of-file-fixer
|
- id: end-of-file-fixer
|
||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
|
@ -16,3 +16,6 @@ python:
|
|||||||
path: .
|
path: .
|
||||||
extra_requirements:
|
extra_requirements:
|
||||||
- d
|
- d
|
||||||
|
|
||||||
|
sphinx:
|
||||||
|
configuration: docs/conf.py
|
||||||
|
@ -181,6 +181,7 @@ Multiple contributions by:
|
|||||||
- [Tony Narlock](mailto:tony@git-pull.com)
|
- [Tony Narlock](mailto:tony@git-pull.com)
|
||||||
- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com)
|
- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com)
|
||||||
- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu)
|
- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu)
|
||||||
|
- [Tushar Sadhwani](mailto:tushar.sadhwani000@gmail.com)
|
||||||
- [Tzu-ping Chung](mailto:uranusjr@gmail.com)
|
- [Tzu-ping Chung](mailto:uranusjr@gmail.com)
|
||||||
- [Utsav Shah](mailto:ukshah2@illinois.edu)
|
- [Utsav Shah](mailto:ukshah2@illinois.edu)
|
||||||
- utsav-dbx
|
- utsav-dbx
|
||||||
|
202
CHANGES.md
202
CHANGES.md
@ -1,5 +1,207 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## Unreleased
|
||||||
|
|
||||||
|
### Highlights
|
||||||
|
|
||||||
|
<!-- Include any especially major or disruptive changes here -->
|
||||||
|
|
||||||
|
### Stable style
|
||||||
|
|
||||||
|
<!-- Changes that affect Black's stable style -->
|
||||||
|
|
||||||
|
- Fix crash while formatting a long `del` statement containing tuples (#4628)
|
||||||
|
- Fix crash while formatting expressions using the walrus operator in complex `with`
|
||||||
|
statements (#4630)
|
||||||
|
- Handle `# fmt: skip` followed by a comment at the end of file (#4635)
|
||||||
|
- Fix crash when a tuple appears in the `as` clause of a `with` statement (#4634)
|
||||||
|
- Fix crash when tuple is used as a context manager inside a `with` statement (#4646)
|
||||||
|
- Fix crash on a `\\r\n` (#4673)
|
||||||
|
- Fix crash on `await ...` (where `...` is a literal `Ellipsis`) (#4676)
|
||||||
|
- Remove support for pre-python 3.7 `await/async` as soft keywords/variable names
|
||||||
|
(#4676)
|
||||||
|
|
||||||
|
### Preview style
|
||||||
|
|
||||||
|
<!-- Changes that affect Black's preview style -->
|
||||||
|
|
||||||
|
- Fix a bug where one-liner functions/conditionals marked with `# fmt: skip` would still
|
||||||
|
be formatted (#4552)
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
<!-- Changes to how Black can be configured -->
|
||||||
|
|
||||||
|
### Packaging
|
||||||
|
|
||||||
|
<!-- Changes to how Black is packaged, such as dependency requirements -->
|
||||||
|
|
||||||
|
### Parser
|
||||||
|
|
||||||
|
<!-- Changes to the parser or to version autodetection -->
|
||||||
|
|
||||||
|
- Rewrite tokenizer to improve performance and compliance (#4536)
|
||||||
|
- Fix bug where certain unusual expressions (e.g., lambdas) were not accepted in type
|
||||||
|
parameter bounds and defaults. (#4602)
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
<!-- Changes that improve Black's performance. -->
|
||||||
|
|
||||||
|
### Output
|
||||||
|
|
||||||
|
<!-- Changes to Black's terminal output and error messages -->
|
||||||
|
|
||||||
|
### _Blackd_
|
||||||
|
|
||||||
|
<!-- Changes to blackd -->
|
||||||
|
|
||||||
|
### Integrations
|
||||||
|
|
||||||
|
<!-- For example, Docker, GitHub Actions, pre-commit, editors -->
|
||||||
|
|
||||||
|
- Fix the version check in the vim file to reject Python 3.8 (#4567)
|
||||||
|
- Enhance GitHub Action `psf/black` to read Black version from an additional section in
|
||||||
|
pyproject.toml: `[project.dependency-groups]` (#4606)
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
<!-- Major changes to documentation and policies. Small docs changes
|
||||||
|
don't need a changelog entry. -->
|
||||||
|
|
||||||
|
## 25.1.0
|
||||||
|
|
||||||
|
### Highlights
|
||||||
|
|
||||||
|
This release introduces the new 2025 stable style (#4558), stabilizing the following
|
||||||
|
changes:
|
||||||
|
|
||||||
|
- Normalize casing of Unicode escape characters in strings to lowercase (#2916)
|
||||||
|
- Fix inconsistencies in whether certain strings are detected as docstrings (#4095)
|
||||||
|
- Consistently add trailing commas to typed function parameters (#4164)
|
||||||
|
- Remove redundant parentheses in if guards for case blocks (#4214)
|
||||||
|
- Add parentheses to if clauses in case blocks when the line is too long (#4269)
|
||||||
|
- Whitespace before `# fmt: skip` comments is no longer normalized (#4146)
|
||||||
|
- Fix line length computation for certain expressions that involve the power operator
|
||||||
|
(#4154)
|
||||||
|
- Check if there is a newline before the terminating quotes of a docstring (#4185)
|
||||||
|
- Fix type annotation spacing between `*` and more complex type variable tuple (#4440)
|
||||||
|
|
||||||
|
The following changes were not in any previous release:
|
||||||
|
|
||||||
|
- Remove parentheses around sole list items (#4312)
|
||||||
|
- Generic function definitions are now formatted more elegantly: parameters are split
|
||||||
|
over multiple lines first instead of type parameter definitions (#4553)
|
||||||
|
|
||||||
|
### Stable style
|
||||||
|
|
||||||
|
- Fix formatting cells in IPython notebooks with magic methods and starting or trailing
|
||||||
|
empty lines (#4484)
|
||||||
|
- Fix crash when formatting `with` statements containing tuple generators/unpacking
|
||||||
|
(#4538)
|
||||||
|
|
||||||
|
### Preview style
|
||||||
|
|
||||||
|
- Fix/remove string merging changing f-string quotes on f-strings with internal quotes
|
||||||
|
(#4498)
|
||||||
|
- Collapse multiple empty lines after an import into one (#4489)
|
||||||
|
- Prevent `string_processing` and `wrap_long_dict_values_in_parens` from removing
|
||||||
|
parentheses around long dictionary values (#4377)
|
||||||
|
- Move `wrap_long_dict_values_in_parens` from the unstable to preview style (#4561)
|
||||||
|
|
||||||
|
### Packaging
|
||||||
|
|
||||||
|
- Store license identifier inside the `License-Expression` metadata field, see
|
||||||
|
[PEP 639](https://peps.python.org/pep-0639/). (#4479)
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
- Speed up the `is_fstring_start` function in Black's tokenizer (#4541)
|
||||||
|
|
||||||
|
### Integrations
|
||||||
|
|
||||||
|
- If using stdin with `--stdin-filename` set to a force excluded path, stdin won't be
|
||||||
|
formatted. (#4539)
|
||||||
|
|
||||||
|
## 24.10.0
|
||||||
|
|
||||||
|
### Highlights
|
||||||
|
|
||||||
|
- Black is now officially tested with Python 3.13 and provides Python 3.13
|
||||||
|
mypyc-compiled wheels. (#4436) (#4449)
|
||||||
|
- Black will issue an error when used with Python 3.12.5, due to an upstream memory
|
||||||
|
safety issue in Python 3.12.5 that can cause Black's AST safety checks to fail. Please
|
||||||
|
use Python 3.12.6 or Python 3.12.4 instead. (#4447)
|
||||||
|
- Black no longer supports running with Python 3.8 (#4452)
|
||||||
|
|
||||||
|
### Stable style
|
||||||
|
|
||||||
|
- Fix crashes involving comments in parenthesised return types or `X | Y` style unions.
|
||||||
|
(#4453)
|
||||||
|
- Fix skipping Jupyter cells with unknown `%%` magic (#4462)
|
||||||
|
|
||||||
|
### Preview style
|
||||||
|
|
||||||
|
- Fix type annotation spacing between * and more complex type variable tuple (i.e. `def
|
||||||
|
fn(*args: *tuple[*Ts, T]) -> None: pass`) (#4440)
|
||||||
|
|
||||||
|
### Caching
|
||||||
|
|
||||||
|
- Fix bug where the cache was shared between runs with and without `--unstable` (#4466)
|
||||||
|
|
||||||
|
### Packaging
|
||||||
|
|
||||||
|
- Upgrade version of mypyc used to 1.12 beta (#4450) (#4449)
|
||||||
|
- `blackd` now requires a newer version of aiohttp. (#4451)
|
||||||
|
|
||||||
|
### Output
|
||||||
|
|
||||||
|
- Added Python target version information on parse error (#4378)
|
||||||
|
- Add information about Black version to internal error messages (#4457)
|
||||||
|
|
||||||
|
## 24.8.0
|
||||||
|
|
||||||
|
### Stable style
|
||||||
|
|
||||||
|
- Fix crash when `# fmt: off` is used before a closing parenthesis or bracket. (#4363)
|
||||||
|
|
||||||
|
### Packaging
|
||||||
|
|
||||||
|
- Packaging metadata updated: docs are explictly linked, the issue tracker is now also
|
||||||
|
linked. This improves the PyPI listing for Black. (#4345)
|
||||||
|
|
||||||
|
### Parser
|
||||||
|
|
||||||
|
- Fix regression where Black failed to parse a multiline f-string containing another
|
||||||
|
multiline string (#4339)
|
||||||
|
- Fix regression where Black failed to parse an escaped single quote inside an f-string
|
||||||
|
(#4401)
|
||||||
|
- Fix bug with Black incorrectly parsing empty lines with a backslash (#4343)
|
||||||
|
- Fix bugs with Black's tokenizer not handling `\{` inside f-strings very well (#4422)
|
||||||
|
- Fix incorrect line numbers in the tokenizer for certain tokens within f-strings
|
||||||
|
(#4423)
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
- Improve performance when a large directory is listed in `.gitignore` (#4415)
|
||||||
|
|
||||||
|
### _Blackd_
|
||||||
|
|
||||||
|
- Fix blackd (and all extras installs) for docker container (#4357)
|
||||||
|
|
||||||
|
## 24.4.2
|
||||||
|
|
||||||
|
This is a bugfix release to fix two regressions in the new f-string parser introduced in
|
||||||
|
24.4.1.
|
||||||
|
|
||||||
|
### Parser
|
||||||
|
|
||||||
|
- Fix regression where certain complex f-strings failed to parse (#4332)
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
- Fix bad performance on certain complex string literals (#4331)
|
||||||
|
|
||||||
## 24.4.1
|
## 24.4.1
|
||||||
|
|
||||||
### Highlights
|
### Highlights
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
# Contributing to _Black_
|
# Contributing to _Black_
|
||||||
|
|
||||||
Welcome! Happy to see you willing to make the project better. Have you read the entire
|
Welcome future contributor! We're happy to see you willing to make the project better.
|
||||||
[user documentation](https://black.readthedocs.io/en/latest/) yet?
|
|
||||||
|
|
||||||
Our [contributing documentation](https://black.readthedocs.org/en/latest/contributing/)
|
If you aren't familiar with _Black_, or are looking for documentation on something
|
||||||
contains details on all you need to know about contributing to _Black_, the basics to
|
specific, the [user documentation](https://black.readthedocs.io/en/latest/) is the best
|
||||||
the internals of _Black_.
|
place to look.
|
||||||
|
|
||||||
We look forward to your contributions!
|
For getting started on contributing, please read the
|
||||||
|
[contributing documentation](https://black.readthedocs.org/en/latest/contributing/) for
|
||||||
|
all you need to know.
|
||||||
|
|
||||||
|
Thank you, and we look forward to your contributions!
|
||||||
|
@ -10,7 +10,8 @@ RUN python -m venv $VIRTUAL_ENV
|
|||||||
RUN python -m pip install --no-cache-dir hatch hatch-fancy-pypi-readme hatch-vcs
|
RUN python -m pip install --no-cache-dir hatch hatch-fancy-pypi-readme hatch-vcs
|
||||||
RUN . /opt/venv/bin/activate && pip install --no-cache-dir --upgrade pip setuptools \
|
RUN . /opt/venv/bin/activate && pip install --no-cache-dir --upgrade pip setuptools \
|
||||||
&& cd /src && hatch build -t wheel \
|
&& cd /src && hatch build -t wheel \
|
||||||
&& pip install --no-cache-dir dist/*-cp*[colorama,d,uvloop]
|
&& pip install --no-cache-dir dist/*-cp* \
|
||||||
|
&& pip install black[colorama,d,uvloop]
|
||||||
|
|
||||||
FROM python:3.12-slim
|
FROM python:3.12-slim
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ Try it out now using the [Black Playground](https://black.vercel.app). Watch the
|
|||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
_Black_ can be installed by running `pip install black`. It requires Python 3.8+ to run.
|
_Black_ can be installed by running `pip install black`. It requires Python 3.9+ to run.
|
||||||
If you want to format Jupyter Notebooks, install with `pip install "black[jupyter]"`.
|
If you want to format Jupyter Notebooks, install with `pip install "black[jupyter]"`.
|
||||||
|
|
||||||
If you can't wait for the latest _hotness_ and want to install from GitHub, use:
|
If you can't wait for the latest _hotness_ and want to install from GitHub, use:
|
||||||
@ -137,8 +137,8 @@ SQLAlchemy, Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtuale
|
|||||||
pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant,
|
pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant,
|
||||||
Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more.
|
Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more.
|
||||||
|
|
||||||
The following organizations use _Black_: Facebook, Dropbox, KeepTruckin, Lyft, Mozilla,
|
The following organizations use _Black_: Dropbox, KeepTruckin, Lyft, Mozilla, Quora,
|
||||||
Quora, Duolingo, QuantumBlack, Tesla, Archer Aviation.
|
Duolingo, QuantumBlack, Tesla, Archer Aviation.
|
||||||
|
|
||||||
Are we missing anyone? Let us know.
|
Are we missing anyone? Let us know.
|
||||||
|
|
||||||
|
@ -71,6 +71,7 @@ def read_version_specifier_from_pyproject() -> str:
|
|||||||
return f"=={version}"
|
return f"=={version}"
|
||||||
|
|
||||||
arrays = [
|
arrays = [
|
||||||
|
*pyproject.get("dependency-groups", {}).values(),
|
||||||
pyproject.get("project", {}).get("dependencies"),
|
pyproject.get("project", {}).get("dependencies"),
|
||||||
*pyproject.get("project", {}).get("optional-dependencies", {}).values(),
|
*pyproject.get("project", {}).get("optional-dependencies", {}).values(),
|
||||||
]
|
]
|
||||||
|
@ -75,8 +75,8 @@ def _initialize_black_env(upgrade=False):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
pyver = sys.version_info[:3]
|
pyver = sys.version_info[:3]
|
||||||
if pyver < (3, 8):
|
if pyver < (3, 9):
|
||||||
print("Sorry, Black requires Python 3.8+ to run.")
|
print("Sorry, Black requires Python 3.9+ to run.")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
27
docs/conf.py
27
docs/conf.py
@ -1,4 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Configuration file for the Sphinx documentation builder.
|
# Configuration file for the Sphinx documentation builder.
|
||||||
#
|
#
|
||||||
@ -14,22 +13,46 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import string
|
import string
|
||||||
from importlib.metadata import version
|
from importlib.metadata import version
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
from sphinx.application import Sphinx
|
||||||
|
|
||||||
CURRENT_DIR = Path(__file__).parent
|
CURRENT_DIR = Path(__file__).parent
|
||||||
|
|
||||||
|
|
||||||
def make_pypi_svg(version: str) -> None:
|
def make_pypi_svg(version: str) -> None:
|
||||||
template: Path = CURRENT_DIR / "_static" / "pypi_template.svg"
|
template: Path = CURRENT_DIR / "_static" / "pypi_template.svg"
|
||||||
target: Path = CURRENT_DIR / "_static" / "pypi.svg"
|
target: Path = CURRENT_DIR / "_static" / "pypi.svg"
|
||||||
with open(str(template), "r", encoding="utf8") as f:
|
with open(str(template), encoding="utf8") as f:
|
||||||
svg: str = string.Template(f.read()).substitute(version=version)
|
svg: str = string.Template(f.read()).substitute(version=version)
|
||||||
with open(str(target), "w", encoding="utf8") as f:
|
with open(str(target), "w", encoding="utf8") as f:
|
||||||
f.write(svg)
|
f.write(svg)
|
||||||
|
|
||||||
|
|
||||||
|
def replace_pr_numbers_with_links(content: str) -> str:
|
||||||
|
"""Replaces all PR numbers with the corresponding GitHub link."""
|
||||||
|
return re.sub(r"#(\d+)", r"[#\1](https://github.com/psf/black/pull/\1)", content)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_include_read(
|
||||||
|
app: Sphinx,
|
||||||
|
relative_path: Path,
|
||||||
|
parent_docname: str,
|
||||||
|
content: list[str],
|
||||||
|
) -> None:
|
||||||
|
"""Handler for the include-read sphinx event."""
|
||||||
|
if parent_docname == "change_log":
|
||||||
|
content[0] = replace_pr_numbers_with_links(content[0])
|
||||||
|
|
||||||
|
|
||||||
|
def setup(app: Sphinx) -> None:
|
||||||
|
"""Sets up a minimal sphinx extension."""
|
||||||
|
app.connect("include-read", handle_include_read)
|
||||||
|
|
||||||
|
|
||||||
# Necessary so Click doesn't hit an encode error when called by
|
# Necessary so Click doesn't hit an encode error when called by
|
||||||
# sphinxcontrib-programoutput on Windows.
|
# sphinxcontrib-programoutput on Windows.
|
||||||
os.putenv("pythonioencoding", "utf-8")
|
os.putenv("pythonioencoding", "utf-8")
|
||||||
|
@ -7,7 +7,14 @@ An overview on contributing to the _Black_ project.
|
|||||||
Development on the latest version of Python is preferred. You can use any operating
|
Development on the latest version of Python is preferred. You can use any operating
|
||||||
system.
|
system.
|
||||||
|
|
||||||
Install development dependencies inside a virtual environment of your choice, for
|
First clone the _Black_ repository:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ git clone https://github.com/psf/black.git
|
||||||
|
$ cd black
|
||||||
|
```
|
||||||
|
|
||||||
|
Then install development dependencies inside a virtual environment of your choice, for
|
||||||
example:
|
example:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
@ -16,7 +23,7 @@ $ source .venv/bin/activate # activation for linux and mac
|
|||||||
$ .venv\Scripts\activate # activation for windows
|
$ .venv\Scripts\activate # activation for windows
|
||||||
|
|
||||||
(.venv)$ pip install -r test_requirements.txt
|
(.venv)$ pip install -r test_requirements.txt
|
||||||
(.venv)$ pip install -e .[d]
|
(.venv)$ pip install -e ".[d]"
|
||||||
(.venv)$ pre-commit install
|
(.venv)$ pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -48,13 +55,16 @@ Further examples of invoking the tests
|
|||||||
# Run tests on a specific python version
|
# Run tests on a specific python version
|
||||||
(.venv)$ tox -e py39
|
(.venv)$ tox -e py39
|
||||||
|
|
||||||
# pass arguments to pytest
|
# Run an individual test
|
||||||
|
(.venv)$ pytest -k <test name>
|
||||||
|
|
||||||
|
# Pass arguments to pytest
|
||||||
(.venv)$ tox -e py -- --no-cov
|
(.venv)$ tox -e py -- --no-cov
|
||||||
|
|
||||||
# print full tree diff, see documentation below
|
# Print full tree diff, see documentation below
|
||||||
(.venv)$ tox -e py -- --print-full-tree
|
(.venv)$ tox -e py -- --print-full-tree
|
||||||
|
|
||||||
# disable diff printing, see documentation below
|
# Disable diff printing, see documentation below
|
||||||
(.venv)$ tox -e py -- --print-tree-diff=False
|
(.venv)$ tox -e py -- --print-tree-diff=False
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -99,16 +109,22 @@ default. To turn it off pass `--print-tree-diff=False`.
|
|||||||
`Black` has CI that will check for an entry corresponding to your PR in `CHANGES.md`. If
|
`Black` has CI that will check for an entry corresponding to your PR in `CHANGES.md`. If
|
||||||
you feel this PR does not require a changelog entry please state that in a comment and a
|
you feel this PR does not require a changelog entry please state that in a comment and a
|
||||||
maintainer can add a `skip news` label to make the CI pass. Otherwise, please ensure you
|
maintainer can add a `skip news` label to make the CI pass. Otherwise, please ensure you
|
||||||
have a line in the following format:
|
have a line in the following format added below the appropriate header:
|
||||||
|
|
||||||
```md
|
```md
|
||||||
- `Black` is now more awesome (#X)
|
- `Black` is now more awesome (#X)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<!---
|
||||||
|
The Next PR Number link uses HTML because of a bug in MyST-Parser that double-escapes the ampersand, causing the query parameters to not be processed.
|
||||||
|
MyST-Parser issue: https://github.com/executablebooks/MyST-Parser/issues/760
|
||||||
|
MyST-Parser stalled fix PR: https://github.com/executablebooks/MyST-Parser/pull/929
|
||||||
|
-->
|
||||||
|
|
||||||
Note that X should be your PR number, not issue number! To workout X, please use
|
Note that X should be your PR number, not issue number! To workout X, please use
|
||||||
[Next PR Number](https://ichard26.github.io/next-pr-number/?owner=psf&name=black). This
|
<a href="https://ichard26.github.io/next-pr-number/?owner=psf&name=black">Next PR
|
||||||
is not perfect but saves a lot of release overhead as now the releaser does not need to
|
Number</a>. This is not perfect but saves a lot of release overhead as now the releaser
|
||||||
go back and workout what to add to the `CHANGES.md` for each release.
|
does not need to go back and workout what to add to the `CHANGES.md` for each release.
|
||||||
|
|
||||||
### Style Changes
|
### Style Changes
|
||||||
|
|
||||||
@ -116,7 +132,7 @@ If a change would affect the advertised code style, please modify the documentat
|
|||||||
_Black_ code style) to reflect that change. Patches that fix unintended bugs in
|
_Black_ code style) to reflect that change. Patches that fix unintended bugs in
|
||||||
formatting don't need to be mentioned separately though. If the change is implemented
|
formatting don't need to be mentioned separately though. If the change is implemented
|
||||||
with the `--preview` flag, please include the change in the future style document
|
with the `--preview` flag, please include the change in the future style document
|
||||||
instead and write the changelog entry under a dedicated "Preview changes" heading.
|
instead and write the changelog entry under the dedicated "Preview style" heading.
|
||||||
|
|
||||||
### Docs Testing
|
### Docs Testing
|
||||||
|
|
||||||
@ -124,17 +140,17 @@ If you make changes to docs, you can test they still build locally too.
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
(.venv)$ pip install -r docs/requirements.txt
|
(.venv)$ pip install -r docs/requirements.txt
|
||||||
(.venv)$ pip install -e .[d]
|
(.venv)$ pip install -e ".[d]"
|
||||||
(.venv)$ sphinx-build -a -b html -W docs/ docs/_build/
|
(.venv)$ sphinx-build -a -b html -W docs/ docs/_build/
|
||||||
```
|
```
|
||||||
|
|
||||||
## Hygiene
|
## Hygiene
|
||||||
|
|
||||||
If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug,
|
If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug,
|
||||||
run it again to confirm it's really fixed.
|
and run the test again to confirm it's really fixed.
|
||||||
|
|
||||||
If adding a new feature, add a test. In fact, always add a test. But wait, before adding
|
If adding a new feature, add a test. In fact, always add a test. If adding a large
|
||||||
any large feature, first open an issue for us to discuss the idea first.
|
feature, please first open an issue to discuss it beforehand.
|
||||||
|
|
||||||
## Finally
|
## Finally
|
||||||
|
|
||||||
|
13
docs/faq.md
13
docs/faq.md
@ -84,16 +84,19 @@ See [Using _Black_ with other tools](labels/why-pycodestyle-warnings).
|
|||||||
|
|
||||||
## Which Python versions does Black support?
|
## Which Python versions does Black support?
|
||||||
|
|
||||||
Currently the runtime requires Python 3.8-3.11. Formatting is supported for files
|
_Black_ generally supports all Python versions supported by CPython (see
|
||||||
containing syntax from Python 3.3 to 3.11. We promise to support at least all Python
|
[the Python devguide](https://devguide.python.org/versions/) for current information).
|
||||||
versions that have not reached their end of life. This is the case for both running
|
We promise to support at least all Python versions that have not reached their end of
|
||||||
_Black_ and formatting code.
|
life. This is the case for both running _Black_ and formatting code.
|
||||||
|
|
||||||
Support for formatting Python 2 code was removed in version 22.0. While we've made no
|
Support for formatting Python 2 code was removed in version 22.0. While we've made no
|
||||||
plans to stop supporting older Python 3 minor versions immediately, their support might
|
plans to stop supporting older Python 3 minor versions immediately, their support might
|
||||||
also be removed some time in the future without a deprecation period.
|
also be removed some time in the future without a deprecation period.
|
||||||
|
|
||||||
Runtime support for 3.7 was removed in version 23.7.0.
|
`await`/`async` as soft keywords/indentifiers are no longer supported as of 25.2.0.
|
||||||
|
|
||||||
|
Runtime support for 3.6 was removed in version 22.10.0, for 3.7 in version 23.7.0, and
|
||||||
|
for 3.8 in version 24.10.0.
|
||||||
|
|
||||||
## Why does my linter or typechecker complain after I format my code?
|
## Why does my linter or typechecker complain after I format my code?
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ Also, you can try out _Black_ online for minimal fuss on the
|
|||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
_Black_ can be installed by running `pip install black`. It requires Python 3.8+ to run.
|
_Black_ can be installed by running `pip install black`. It requires Python 3.9+ to run.
|
||||||
If you want to format Jupyter Notebooks, install with `pip install "black[jupyter]"`.
|
If you want to format Jupyter Notebooks, install with `pip install "black[jupyter]"`.
|
||||||
|
|
||||||
If you use pipx, you can install Black with `pipx install black`.
|
If you use pipx, you can install Black with `pipx install black`.
|
||||||
|
@ -236,7 +236,7 @@ Configuration:
|
|||||||
|
|
||||||
#### Installation
|
#### Installation
|
||||||
|
|
||||||
This plugin **requires Vim 7.0+ built with Python 3.8+ support**. It needs Python 3.8 to
|
This plugin **requires Vim 7.0+ built with Python 3.9+ support**. It needs Python 3.9 to
|
||||||
be able to run _Black_ inside the Vim process which is much faster than calling an
|
be able to run _Black_ inside the Vim process which is much faster than calling an
|
||||||
external command.
|
external command.
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ jobs:
|
|||||||
lint:
|
lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: psf/black@stable
|
- uses: psf/black@stable
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -37,10 +37,10 @@ the `pyproject.toml` file. `version` can be any
|
|||||||
[valid version specifier](https://packaging.python.org/en/latest/glossary/#term-Version-Specifier)
|
[valid version specifier](https://packaging.python.org/en/latest/glossary/#term-Version-Specifier)
|
||||||
or just the version number if you want an exact version. To read the version from the
|
or just the version number if you want an exact version. To read the version from the
|
||||||
`pyproject.toml` file instead, set `use_pyproject` to `true`. This will first look into
|
`pyproject.toml` file instead, set `use_pyproject` to `true`. This will first look into
|
||||||
the `tool.black.required-version` field, then the `project.dependencies` array and
|
the `tool.black.required-version` field, then the `dependency-groups` table, then the
|
||||||
finally the `project.optional-dependencies` table. The action defaults to the latest
|
`project.dependencies` array and finally the `project.optional-dependencies` table. The
|
||||||
release available on PyPI. Only versions available from PyPI are supported, so no commit
|
action defaults to the latest release available on PyPI. Only versions available from
|
||||||
SHAs or branch names.
|
PyPI are supported, so no commit SHAs or branch names.
|
||||||
|
|
||||||
If you want to include Jupyter Notebooks, _Black_ must be installed with the `jupyter`
|
If you want to include Jupyter Notebooks, _Black_ must be installed with the `jupyter`
|
||||||
extra. Installing the extra and including Jupyter Notebook files can be configured via
|
extra. Installing the extra and including Jupyter Notebook files can be configured via
|
||||||
@ -74,9 +74,14 @@ If you want to match versions covered by Black's
|
|||||||
version: "~= 22.0"
|
version: "~= 22.0"
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to read the version from `pyproject.toml`, set `use_pyproject` to `true`:
|
If you want to read the version from `pyproject.toml`, set `use_pyproject` to `true`.
|
||||||
|
Note that this requires Python >= 3.11, so using the setup-python action may be
|
||||||
|
required, for example:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.13"
|
||||||
- uses: psf/black@stable
|
- uses: psf/black@stable
|
||||||
with:
|
with:
|
||||||
options: "--check --verbose"
|
options: "--check --verbose"
|
||||||
|
@ -8,7 +8,7 @@ Use [pre-commit](https://pre-commit.com/). Once you
|
|||||||
repos:
|
repos:
|
||||||
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
||||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||||
rev: 24.4.1
|
rev: 25.1.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: black
|
- id: black
|
||||||
# It is recommended to specify the latest version of Python
|
# It is recommended to specify the latest version of Python
|
||||||
@ -35,7 +35,7 @@ include Jupyter Notebooks. To use this hook, simply replace the hook's `id: blac
|
|||||||
repos:
|
repos:
|
||||||
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
# Using this mirror lets us use mypyc-compiled black, which is about 2x faster
|
||||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||||
rev: 24.4.1
|
rev: 25.1.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: black-jupyter
|
- id: black-jupyter
|
||||||
# It is recommended to specify the latest version of Python
|
# It is recommended to specify the latest version of Python
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# Used by ReadTheDocs; pinned requirements for stability.
|
# Used by ReadTheDocs; pinned requirements for stability.
|
||||||
|
|
||||||
myst-parser==2.0.0
|
myst-parser==4.0.1
|
||||||
Sphinx==7.3.7
|
Sphinx==8.2.3
|
||||||
# Older versions break Sphinx even though they're declared to be supported.
|
# Older versions break Sphinx even though they're declared to be supported.
|
||||||
docutils==0.20.1
|
docutils==0.21.2
|
||||||
sphinxcontrib-programoutput==0.17
|
sphinxcontrib-programoutput==0.18
|
||||||
sphinx_copybutton==0.5.2
|
sphinx_copybutton==0.5.2
|
||||||
furo==2024.1.29
|
furo==2024.8.6
|
||||||
|
@ -250,6 +250,11 @@ exception of [capital "R" prefixes](#rstrings-and-rstrings), unicode literal mar
|
|||||||
(`u`) are removed because they are meaningless in Python 3, and in the case of multiple
|
(`u`) are removed because they are meaningless in Python 3, and in the case of multiple
|
||||||
characters "r" is put first as in spoken language: "raw f-string".
|
characters "r" is put first as in spoken language: "raw f-string".
|
||||||
|
|
||||||
|
Another area where Python allows multiple ways to format a string is escape sequences.
|
||||||
|
For example, `"\uabcd"` and `"\uABCD"` evaluate to the same string. _Black_ normalizes
|
||||||
|
such escape sequences to lowercase, but uses uppercase for `\N` named character escapes,
|
||||||
|
such as `"\N{MEETEI MAYEK LETTER HUK}"`.
|
||||||
|
|
||||||
The main reason to standardize on a single form of quotes is aesthetics. Having one kind
|
The main reason to standardize on a single form of quotes is aesthetics. Having one kind
|
||||||
of quotes everywhere reduces reader distraction. It will also enable a future version of
|
of quotes everywhere reduces reader distraction. It will also enable a future version of
|
||||||
_Black_ to merge consecutive string literals that ended up on the same line (see
|
_Black_ to merge consecutive string literals that ended up on the same line (see
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
## Preview style
|
## Preview style
|
||||||
|
|
||||||
|
(labels/preview-style)=
|
||||||
|
|
||||||
Experimental, potentially disruptive style changes are gathered under the `--preview`
|
Experimental, potentially disruptive style changes are gathered under the `--preview`
|
||||||
CLI flag. At the end of each year, these changes may be adopted into the default style,
|
CLI flag. At the end of each year, these changes may be adopted into the default style,
|
||||||
as described in [The Black Code Style](index.md). Because the functionality is
|
as described in [The Black Code Style](index.md). Because the functionality is
|
||||||
@ -20,22 +22,13 @@ demoted from the `--preview` to the `--unstable` style, users can use the
|
|||||||
|
|
||||||
Currently, the following features are included in the preview style:
|
Currently, the following features are included in the preview style:
|
||||||
|
|
||||||
- `hex_codes_in_unicode_sequences`: normalize casing of Unicode escape characters in
|
- `always_one_newline_after_import`: Always force one blank line after import
|
||||||
strings
|
statements, except when the line after the import is a comment or an import statement
|
||||||
- `unify_docstring_detection`: fix inconsistencies in whether certain strings are
|
- `wrap_long_dict_values_in_parens`: Add parentheses around long values in dictionaries
|
||||||
detected as docstrings
|
([see below](labels/wrap-long-dict-values))
|
||||||
- `no_normalize_fmt_skip_whitespace`: whitespace before `# fmt: skip` comments is no
|
- `fix_fmt_skip_in_one_liners`: Fix `# fmt: skip` behaviour on one-liner declarations,
|
||||||
longer normalized
|
such as `def foo(): return "mock" # fmt: skip`, where previously the declaration
|
||||||
- `typed_params_trailing_comma`: consistently add trailing commas to typed function
|
would have been incorrectly collapsed.
|
||||||
parameters
|
|
||||||
- `is_simple_lookup_for_doublestar_expression`: fix line length computation for certain
|
|
||||||
expressions that involve the power operator
|
|
||||||
- `docstring_check_for_newline`: checks if there is a newline before the terminating
|
|
||||||
quotes of a docstring
|
|
||||||
- `remove_redundant_guard_parens`: Removes redundant parentheses in `if` guards for
|
|
||||||
`case` blocks.
|
|
||||||
- `parens_for_long_if_clauses_in_case_block`: Adds parentheses to `if` clauses in `case`
|
|
||||||
blocks when the line is too long
|
|
||||||
|
|
||||||
(labels/unstable-features)=
|
(labels/unstable-features)=
|
||||||
|
|
||||||
@ -43,13 +36,38 @@ The unstable style additionally includes the following features:
|
|||||||
|
|
||||||
- `string_processing`: split long string literals and related changes
|
- `string_processing`: split long string literals and related changes
|
||||||
([see below](labels/string-processing))
|
([see below](labels/string-processing))
|
||||||
- `wrap_long_dict_values_in_parens`: add parentheses to long values in dictionaries
|
|
||||||
([see below](labels/wrap-long-dict-values))
|
|
||||||
- `multiline_string_handling`: more compact formatting of expressions involving
|
- `multiline_string_handling`: more compact formatting of expressions involving
|
||||||
multiline strings ([see below](labels/multiline-string-handling))
|
multiline strings ([see below](labels/multiline-string-handling))
|
||||||
- `hug_parens_with_braces_and_square_brackets`: more compact formatting of nested
|
- `hug_parens_with_braces_and_square_brackets`: more compact formatting of nested
|
||||||
brackets ([see below](labels/hug-parens))
|
brackets ([see below](labels/hug-parens))
|
||||||
|
|
||||||
|
(labels/wrap-long-dict-values)=
|
||||||
|
|
||||||
|
### Improved parentheses management in dicts
|
||||||
|
|
||||||
|
For dict literals with long values, they are now wrapped in parentheses. Unnecessary
|
||||||
|
parentheses are now removed. For example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
my_dict = {
|
||||||
|
"a key in my dict": a_very_long_variable
|
||||||
|
* and_a_very_long_function_call()
|
||||||
|
/ 100000.0,
|
||||||
|
"another key": (short_value),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
will be changed to:
|
||||||
|
|
||||||
|
```python
|
||||||
|
my_dict = {
|
||||||
|
"a key in my dict": (
|
||||||
|
a_very_long_variable * and_a_very_long_function_call() / 100000.0
|
||||||
|
),
|
||||||
|
"another key": short_value,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
(labels/hug-parens)=
|
(labels/hug-parens)=
|
||||||
|
|
||||||
### Improved multiline dictionary and list indentation for sole function parameter
|
### Improved multiline dictionary and list indentation for sole function parameter
|
||||||
@ -130,37 +148,11 @@ foo(
|
|||||||
|
|
||||||
_Black_ will split long string literals and merge short ones. Parentheses are used where
|
_Black_ will split long string literals and merge short ones. Parentheses are used where
|
||||||
appropriate. When split, parts of f-strings that don't need formatting are converted to
|
appropriate. When split, parts of f-strings that don't need formatting are converted to
|
||||||
plain strings. User-made splits are respected when they do not exceed the line length
|
plain strings. f-strings will not be merged if they contain internal quotes and it would
|
||||||
limit. Line continuation backslashes are converted into parenthesized strings.
|
change their quotation mark style. User-made splits are respected when they do not
|
||||||
Unnecessary parentheses are stripped. The stability and status of this feature is
|
exceed the line length limit. Line continuation backslashes are converted into
|
||||||
tracked in [this issue](https://github.com/psf/black/issues/2188).
|
parenthesized strings. Unnecessary parentheses are stripped. The stability and status of
|
||||||
|
this feature istracked in [this issue](https://github.com/psf/black/issues/2188).
|
||||||
(labels/wrap-long-dict-values)=
|
|
||||||
|
|
||||||
### Improved parentheses management in dicts
|
|
||||||
|
|
||||||
For dict literals with long values, they are now wrapped in parentheses. Unnecessary
|
|
||||||
parentheses are now removed. For example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
my_dict = {
|
|
||||||
"a key in my dict": a_very_long_variable
|
|
||||||
* and_a_very_long_function_call()
|
|
||||||
/ 100000.0,
|
|
||||||
"another key": (short_value),
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
will be changed to:
|
|
||||||
|
|
||||||
```python
|
|
||||||
my_dict = {
|
|
||||||
"a key in my dict": (
|
|
||||||
a_very_long_variable * and_a_very_long_function_call() / 100000.0
|
|
||||||
),
|
|
||||||
"another key": short_value,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
(labels/multiline-string-handling)=
|
(labels/multiline-string-handling)=
|
||||||
|
|
||||||
@ -275,52 +267,3 @@ s = ( # Top comment
|
|||||||
# Bottom comment
|
# Bottom comment
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Potential future changes
|
|
||||||
|
|
||||||
This section lists changes that we may want to make in the future, but that aren't
|
|
||||||
implemented yet.
|
|
||||||
|
|
||||||
### Using backslashes for with statements
|
|
||||||
|
|
||||||
[Backslashes are bad and should be never be used](labels/why-no-backslashes) however
|
|
||||||
there is one exception: `with` statements using multiple context managers. Before Python
|
|
||||||
3.9 Python's grammar does not allow organizing parentheses around the series of context
|
|
||||||
managers.
|
|
||||||
|
|
||||||
We don't want formatting like:
|
|
||||||
|
|
||||||
```py3
|
|
||||||
with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4:
|
|
||||||
... # nothing to split on - line too long
|
|
||||||
```
|
|
||||||
|
|
||||||
So _Black_ will, when we implement this, format it like this:
|
|
||||||
|
|
||||||
```py3
|
|
||||||
with \
|
|
||||||
make_context_manager1() as cm1, \
|
|
||||||
make_context_manager2() as cm2, \
|
|
||||||
make_context_manager3() as cm3, \
|
|
||||||
make_context_manager4() as cm4 \
|
|
||||||
:
|
|
||||||
... # backslashes and an ugly stranded colon
|
|
||||||
```
|
|
||||||
|
|
||||||
Although when the target version is Python 3.9 or higher, _Black_ uses parentheses
|
|
||||||
instead in `--preview` mode (see below) since they're allowed in Python 3.9 and higher.
|
|
||||||
|
|
||||||
An alternative to consider if the backslashes in the above formatting are undesirable is
|
|
||||||
to use {external:py:obj}`contextlib.ExitStack` to combine context managers in the
|
|
||||||
following way:
|
|
||||||
|
|
||||||
```python
|
|
||||||
with contextlib.ExitStack() as exit_stack:
|
|
||||||
cm1 = exit_stack.enter_context(make_context_manager1())
|
|
||||||
cm2 = exit_stack.enter_context(make_context_manager2())
|
|
||||||
cm3 = exit_stack.enter_context(make_context_manager3())
|
|
||||||
cm4 = exit_stack.enter_context(make_context_manager4())
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
(labels/preview-style)=
|
|
||||||
|
@ -8,16 +8,16 @@ _Black_ images with the following tags are available:
|
|||||||
- release numbers, e.g. `21.5b2`, `21.6b0`, `21.7b0` etc.\
|
- release numbers, e.g. `21.5b2`, `21.6b0`, `21.7b0` etc.\
|
||||||
ℹ Recommended for users who want to use a particular version of _Black_.
|
ℹ Recommended for users who want to use a particular version of _Black_.
|
||||||
- `latest_release` - tag created when a new version of _Black_ is released.\
|
- `latest_release` - tag created when a new version of _Black_ is released.\
|
||||||
ℹ Recommended for users who want to use released versions of _Black_. It maps to [the latest release](https://github.com/psf/black/releases/latest)
|
ℹ Recommended for users who want to use released versions of _Black_. It maps to
|
||||||
of _Black_.
|
[the latest release](https://github.com/psf/black/releases/latest) of _Black_.
|
||||||
- `latest_prerelease` - tag created when a new alpha (prerelease) version of _Black_ is
|
- `latest_prerelease` - tag created when a new alpha (prerelease) version of _Black_ is
|
||||||
released.\
|
released.\
|
||||||
ℹ Recommended for users who want to preview or test alpha versions of _Black_. Note that
|
ℹ Recommended for users who want to preview or test alpha versions of _Black_. Note
|
||||||
the most recent release may be newer than any prerelease, because no prereleases are created
|
that the most recent release may be newer than any prerelease, because no prereleases
|
||||||
before most releases.
|
are created before most releases.
|
||||||
- `latest` - tag used for the newest image of _Black_.\
|
- `latest` - tag used for the newest image of _Black_.\
|
||||||
ℹ Recommended for users who always want to use the latest version of _Black_, even before
|
ℹ Recommended for users who always want to use the latest version of _Black_, even
|
||||||
it is released.
|
before it is released.
|
||||||
|
|
||||||
There is one more tag used for _Black_ Docker images - `latest_non_release`. It is
|
There is one more tag used for _Black_ Docker images - `latest_non_release`. It is
|
||||||
created for all unreleased
|
created for all unreleased
|
||||||
|
@ -70,17 +70,17 @@ See also [the style documentation](labels/line-length).
|
|||||||
|
|
||||||
Python versions that should be supported by Black's output. You can run `black --help`
|
Python versions that should be supported by Black's output. You can run `black --help`
|
||||||
and look for the `--target-version` option to see the full list of supported versions.
|
and look for the `--target-version` option to see the full list of supported versions.
|
||||||
You should include all versions that your code supports. If you support Python 3.8
|
You should include all versions that your code supports. If you support Python 3.11
|
||||||
through 3.11, you should write:
|
through 3.13, you should write:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ black -t py38 -t py39 -t py310 -t py311
|
$ black -t py311 -t py312 -t py313
|
||||||
```
|
```
|
||||||
|
|
||||||
In a [configuration file](#configuration-via-a-file), you can write:
|
In a [configuration file](#configuration-via-a-file), you can write:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
target-version = ["py38", "py39", "py310", "py311"]
|
target-version = ["py311", "py312", "py313"]
|
||||||
```
|
```
|
||||||
|
|
||||||
By default, Black will infer target versions from the project metadata in
|
By default, Black will infer target versions from the project metadata in
|
||||||
@ -269,8 +269,8 @@ configuration file for consistent results across environments.
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ black --version
|
$ black --version
|
||||||
black, 24.4.1 (compiled: yes)
|
black, 25.1.0 (compiled: yes)
|
||||||
$ black --required-version 24.4.1 -c "format = 'this'"
|
$ black --required-version 25.1.0 -c "format = 'this'"
|
||||||
format = "this"
|
format = "this"
|
||||||
$ black --required-version 31.5b2 -c "still = 'beta?!'"
|
$ black --required-version 31.5b2 -c "still = 'beta?!'"
|
||||||
Oh no! 💥 💔 💥 The required version does not match the running version!
|
Oh no! 💥 💔 💥 The required version does not match the running version!
|
||||||
@ -366,7 +366,7 @@ You can check the version of _Black_ you have installed using the `--version` fl
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ black --version
|
$ black --version
|
||||||
black, 24.4.1
|
black, 25.1.0
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `--config`
|
#### `--config`
|
||||||
@ -478,9 +478,10 @@ operating system, this configuration file should be stored as:
|
|||||||
`XDG_CONFIG_HOME` environment variable is not set)
|
`XDG_CONFIG_HOME` environment variable is not set)
|
||||||
|
|
||||||
Note that these are paths to the TOML file itself (meaning that they shouldn't be named
|
Note that these are paths to the TOML file itself (meaning that they shouldn't be named
|
||||||
as `pyproject.toml`), not directories where you store the configuration. Here, `~`
|
as `pyproject.toml`), not directories where you store the configuration (i.e.,
|
||||||
refers to the path to your home directory. On Windows, this will be something like
|
`black`/`.black` is the file to create and add your configuration options to, in the
|
||||||
`C:\\Users\UserName`.
|
`~/.config/` directory). Here, `~` refers to the path to your home directory. On
|
||||||
|
Windows, this will be something like `C:\\Users\UserName`.
|
||||||
|
|
||||||
You can also explicitly specify the path to a particular file that you want with
|
You can also explicitly specify the path to a particular file that you want with
|
||||||
`--config`. In this situation _Black_ will not look for any other file.
|
`--config`. In this situation _Black_ will not look for any other file.
|
||||||
|
@ -7,15 +7,16 @@
|
|||||||
import venv
|
import venv
|
||||||
import zipfile
|
import zipfile
|
||||||
from argparse import ArgumentParser, Namespace
|
from argparse import ArgumentParser, Namespace
|
||||||
|
from collections.abc import Generator
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from functools import lru_cache, partial
|
from functools import lru_cache, partial
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Generator, List, NamedTuple, Optional, Tuple, Union, cast
|
from typing import NamedTuple, Optional, Union, cast
|
||||||
from urllib.request import urlopen, urlretrieve
|
from urllib.request import urlopen, urlretrieve
|
||||||
|
|
||||||
PYPI_INSTANCE = "https://pypi.org/pypi"
|
PYPI_INSTANCE = "https://pypi.org/pypi"
|
||||||
PYPI_TOP_PACKAGES = (
|
PYPI_TOP_PACKAGES = (
|
||||||
"https://hugovk.github.io/top-pypi-packages/top-pypi-packages-30-days.min.json"
|
"https://hugovk.github.io/top-pypi-packages/top-pypi-packages.min.json"
|
||||||
)
|
)
|
||||||
INTERNAL_BLACK_REPO = f"{tempfile.gettempdir()}/__black"
|
INTERNAL_BLACK_REPO = f"{tempfile.gettempdir()}/__black"
|
||||||
|
|
||||||
@ -54,7 +55,7 @@ def get_pypi_download_url(package: str, version: Optional[str]) -> str:
|
|||||||
return cast(str, source["url"])
|
return cast(str, source["url"])
|
||||||
|
|
||||||
|
|
||||||
def get_top_packages() -> List[str]:
|
def get_top_packages() -> list[str]:
|
||||||
with urlopen(PYPI_TOP_PACKAGES) as page:
|
with urlopen(PYPI_TOP_PACKAGES) as page:
|
||||||
result = json.load(page)
|
result = json.load(page)
|
||||||
|
|
||||||
@ -150,7 +151,7 @@ def git_switch_branch(
|
|||||||
subprocess.run(args, cwd=repo)
|
subprocess.run(args, cwd=repo)
|
||||||
|
|
||||||
|
|
||||||
def init_repos(options: Namespace) -> Tuple[Path, ...]:
|
def init_repos(options: Namespace) -> tuple[Path, ...]:
|
||||||
options.output.mkdir(exist_ok=True)
|
options.output.mkdir(exist_ok=True)
|
||||||
|
|
||||||
if options.top_packages:
|
if options.top_packages:
|
||||||
@ -206,7 +207,7 @@ def format_repo_with_version(
|
|||||||
git_switch_branch(black_version.version, repo=black_repo)
|
git_switch_branch(black_version.version, repo=black_repo)
|
||||||
git_switch_branch(current_branch, repo=repo, new=True, from_branch=from_branch)
|
git_switch_branch(current_branch, repo=repo, new=True, from_branch=from_branch)
|
||||||
|
|
||||||
format_cmd: List[Union[Path, str]] = [
|
format_cmd: list[Union[Path, str]] = [
|
||||||
black_runner(black_version.version, black_repo),
|
black_runner(black_version.version, black_repo),
|
||||||
(black_repo / "black.py").resolve(),
|
(black_repo / "black.py").resolve(),
|
||||||
".",
|
".",
|
||||||
@ -222,7 +223,7 @@ def format_repo_with_version(
|
|||||||
return current_branch
|
return current_branch
|
||||||
|
|
||||||
|
|
||||||
def format_repos(repos: Tuple[Path, ...], options: Namespace) -> None:
|
def format_repos(repos: tuple[Path, ...], options: Namespace) -> None:
|
||||||
black_versions = tuple(
|
black_versions = tuple(
|
||||||
BlackVersion(*version.split(":")) for version in options.versions
|
BlackVersion(*version.split(":")) for version in options.versions
|
||||||
)
|
)
|
||||||
|
@ -21,7 +21,7 @@ endif
|
|||||||
|
|
||||||
if v:version < 700 || !has('python3')
|
if v:version < 700 || !has('python3')
|
||||||
func! __BLACK_MISSING()
|
func! __BLACK_MISSING()
|
||||||
echo "The black.vim plugin requires vim7.0+ with Python 3.6 support."
|
echo "The black.vim plugin requires vim7.0+ with Python 3.9 support."
|
||||||
endfunc
|
endfunc
|
||||||
command! Black :call __BLACK_MISSING()
|
command! Black :call __BLACK_MISSING()
|
||||||
command! BlackUpgrade :call __BLACK_MISSING()
|
command! BlackUpgrade :call __BLACK_MISSING()
|
||||||
@ -72,12 +72,11 @@ endif
|
|||||||
|
|
||||||
function BlackComplete(ArgLead, CmdLine, CursorPos)
|
function BlackComplete(ArgLead, CmdLine, CursorPos)
|
||||||
return [
|
return [
|
||||||
\ 'target_version=py27',
|
|
||||||
\ 'target_version=py36',
|
|
||||||
\ 'target_version=py37',
|
|
||||||
\ 'target_version=py38',
|
|
||||||
\ 'target_version=py39',
|
\ 'target_version=py39',
|
||||||
\ 'target_version=py310',
|
\ 'target_version=py310',
|
||||||
|
\ 'target_version=py311',
|
||||||
|
\ 'target_version=py312',
|
||||||
|
\ 'target_version=py313',
|
||||||
\ ]
|
\ ]
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
|
@ -7,14 +7,15 @@
|
|||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
line-length = 88
|
line-length = 88
|
||||||
target-version = ['py38']
|
target-version = ['py39']
|
||||||
include = '\.pyi?$'
|
include = '\.pyi?$'
|
||||||
extend-exclude = '''
|
extend-exclude = '''
|
||||||
/(
|
/(
|
||||||
# The following are specific to Black, you probably don't want those.
|
# The following are specific to Black, you probably don't want those.
|
||||||
tests/data
|
tests/data/
|
||||||
| profiling
|
| profiling/
|
||||||
)/
|
| scripts/generate_schema.py # Uses match syntax
|
||||||
|
)
|
||||||
'''
|
'''
|
||||||
# We use the unstable style for formatting Black itself. If you
|
# We use the unstable style for formatting Black itself. If you
|
||||||
# want bug-free formatting, you should keep this off. If you want
|
# want bug-free formatting, you should keep this off. If you want
|
||||||
@ -32,8 +33,8 @@ build-backend = "hatchling.build"
|
|||||||
[project]
|
[project]
|
||||||
name = "black"
|
name = "black"
|
||||||
description = "The uncompromising code formatter."
|
description = "The uncompromising code formatter."
|
||||||
license = { text = "MIT" }
|
license = "MIT"
|
||||||
requires-python = ">=3.8"
|
requires-python = ">=3.9"
|
||||||
authors = [
|
authors = [
|
||||||
{ name = "Łukasz Langa", email = "lukasz@langa.pl" },
|
{ name = "Łukasz Langa", email = "lukasz@langa.pl" },
|
||||||
]
|
]
|
||||||
@ -54,11 +55,11 @@ classifiers = [
|
|||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
"Programming Language :: Python",
|
"Programming Language :: Python",
|
||||||
"Programming Language :: Python :: 3 :: Only",
|
"Programming Language :: Python :: 3 :: Only",
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Programming Language :: Python :: 3.9",
|
"Programming Language :: Python :: 3.9",
|
||||||
"Programming Language :: Python :: 3.10",
|
"Programming Language :: Python :: 3.10",
|
||||||
"Programming Language :: Python :: 3.11",
|
"Programming Language :: Python :: 3.11",
|
||||||
"Programming Language :: Python :: 3.12",
|
"Programming Language :: Python :: 3.12",
|
||||||
|
"Programming Language :: Python :: 3.13",
|
||||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||||
"Topic :: Software Development :: Quality Assurance",
|
"Topic :: Software Development :: Quality Assurance",
|
||||||
]
|
]
|
||||||
@ -68,6 +69,7 @@ dependencies = [
|
|||||||
"packaging>=22.0",
|
"packaging>=22.0",
|
||||||
"pathspec>=0.9.0",
|
"pathspec>=0.9.0",
|
||||||
"platformdirs>=2",
|
"platformdirs>=2",
|
||||||
|
"pytokens>=0.1.10",
|
||||||
"tomli>=1.1.0; python_version < '3.11'",
|
"tomli>=1.1.0; python_version < '3.11'",
|
||||||
"typing_extensions>=4.0.1; python_version < '3.11'",
|
"typing_extensions>=4.0.1; python_version < '3.11'",
|
||||||
]
|
]
|
||||||
@ -76,10 +78,7 @@ dynamic = ["readme", "version"]
|
|||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
colorama = ["colorama>=0.4.3"]
|
colorama = ["colorama>=0.4.3"]
|
||||||
uvloop = ["uvloop>=0.15.2"]
|
uvloop = ["uvloop>=0.15.2"]
|
||||||
d = [
|
d = ["aiohttp>=3.10"]
|
||||||
"aiohttp>=3.7.4; sys_platform != 'win32' or implementation_name != 'pypy'",
|
|
||||||
"aiohttp>=3.7.4, !=3.9.0; sys_platform == 'win32' and implementation_name == 'pypy'",
|
|
||||||
]
|
|
||||||
jupyter = [
|
jupyter = [
|
||||||
"ipython>=7.8.0",
|
"ipython>=7.8.0",
|
||||||
"tokenize-rt>=3.2.0",
|
"tokenize-rt>=3.2.0",
|
||||||
@ -93,8 +92,10 @@ blackd = "blackd:patched_main [d]"
|
|||||||
black = "black.schema:get_schema"
|
black = "black.schema:get_schema"
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
|
Documentation = "https://black.readthedocs.io/"
|
||||||
Changelog = "https://github.com/psf/black/blob/main/CHANGES.md"
|
Changelog = "https://github.com/psf/black/blob/main/CHANGES.md"
|
||||||
Homepage = "https://github.com/psf/black"
|
Repository = "https://github.com/psf/black"
|
||||||
|
Issues = "https://github.com/psf/black/issues"
|
||||||
|
|
||||||
[tool.hatch.metadata.hooks.fancy-pypi-readme]
|
[tool.hatch.metadata.hooks.fancy-pypi-readme]
|
||||||
content-type = "text/markdown"
|
content-type = "text/markdown"
|
||||||
@ -125,8 +126,8 @@ macos-max-compat = true
|
|||||||
enable-by-default = false
|
enable-by-default = false
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hatch-mypyc>=0.16.0",
|
"hatch-mypyc>=0.16.0",
|
||||||
"mypy==1.7.1",
|
"mypy>=1.12",
|
||||||
"click==8.1.3", # avoid https://github.com/pallets/click/issues/2558
|
"click>=8.1.7",
|
||||||
]
|
]
|
||||||
require-runtime-dependencies = true
|
require-runtime-dependencies = true
|
||||||
exclude = [
|
exclude = [
|
||||||
@ -149,12 +150,14 @@ options = { debug_level = "0" }
|
|||||||
|
|
||||||
[tool.cibuildwheel]
|
[tool.cibuildwheel]
|
||||||
build-verbosity = 1
|
build-verbosity = 1
|
||||||
|
|
||||||
# So these are the environments we target:
|
# So these are the environments we target:
|
||||||
# - Python: CPython 3.8+ only
|
# - Python: CPython 3.9+ only
|
||||||
# - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64
|
# - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64
|
||||||
# - OS: Linux (no musl), Windows, and macOS
|
# - OS: Linux (no musl), Windows, and macOS
|
||||||
build = "cp3*"
|
build = "cp3*"
|
||||||
skip = ["*-manylinux_i686", "*-musllinux_*", "*-win32", "pp*"]
|
skip = ["*-manylinux_i686", "*-musllinux_*", "*-win32", "pp*"]
|
||||||
|
|
||||||
# This is the bare minimum needed to run the test suite. Pulling in the full
|
# This is the bare minimum needed to run the test suite. Pulling in the full
|
||||||
# test_requirements.txt would download a bunch of other packages not necessary
|
# test_requirements.txt would download a bunch of other packages not necessary
|
||||||
# here and would slow down the testing step a fair bit.
|
# here and would slow down the testing step a fair bit.
|
||||||
@ -169,11 +172,9 @@ test-skip = ["*-macosx_arm64", "*-macosx_universal2:arm64"]
|
|||||||
HATCH_BUILD_HOOKS_ENABLE = "1"
|
HATCH_BUILD_HOOKS_ENABLE = "1"
|
||||||
MYPYC_OPT_LEVEL = "3"
|
MYPYC_OPT_LEVEL = "3"
|
||||||
MYPYC_DEBUG_LEVEL = "0"
|
MYPYC_DEBUG_LEVEL = "0"
|
||||||
# CPython 3.11 wheels aren't available for aiohttp and building a Cython extension
|
|
||||||
# from source also doesn't work.
|
|
||||||
AIOHTTP_NO_EXTENSIONS = "1"
|
|
||||||
|
|
||||||
[tool.cibuildwheel.linux]
|
[tool.cibuildwheel.linux]
|
||||||
|
manylinux-x86_64-image = "manylinux_2_28"
|
||||||
before-build = [
|
before-build = [
|
||||||
"yum install -y clang gcc",
|
"yum install -y clang gcc",
|
||||||
]
|
]
|
||||||
@ -182,19 +183,10 @@ before-build = [
|
|||||||
HATCH_BUILD_HOOKS_ENABLE = "1"
|
HATCH_BUILD_HOOKS_ENABLE = "1"
|
||||||
MYPYC_OPT_LEVEL = "3"
|
MYPYC_OPT_LEVEL = "3"
|
||||||
MYPYC_DEBUG_LEVEL = "0"
|
MYPYC_DEBUG_LEVEL = "0"
|
||||||
AIOHTTP_NO_EXTENSIONS = "1"
|
|
||||||
|
|
||||||
# Black needs Clang to compile successfully on Linux.
|
# Black needs Clang to compile successfully on Linux.
|
||||||
CC = "clang"
|
CC = "clang"
|
||||||
|
|
||||||
[tool.cibuildwheel.macos]
|
|
||||||
build-frontend = { name = "build", args = ["--no-isolation"] }
|
|
||||||
# Unfortunately, hatch doesn't respect MACOSX_DEPLOYMENT_TARGET
|
|
||||||
before-build = [
|
|
||||||
"python -m pip install 'hatchling==1.20.0' hatch-vcs hatch-fancy-pypi-readme 'hatch-mypyc>=0.16.0' 'mypy==1.7.1' 'click==8.1.3'",
|
|
||||||
"""sed -i '' -e "600,700s/'10_16'/os.environ['MACOSX_DEPLOYMENT_TARGET'].replace('.', '_')/" $(python -c 'import hatchling.builders.wheel as h; print(h.__file__)') """,
|
|
||||||
]
|
|
||||||
|
|
||||||
[tool.isort]
|
[tool.isort]
|
||||||
atomic = true
|
atomic = true
|
||||||
profile = "black"
|
profile = "black"
|
||||||
@ -214,23 +206,7 @@ markers = [
|
|||||||
"incompatible_with_mypyc: run when testing mypyc compiled black"
|
"incompatible_with_mypyc: run when testing mypyc compiled black"
|
||||||
]
|
]
|
||||||
xfail_strict = true
|
xfail_strict = true
|
||||||
filterwarnings = [
|
filterwarnings = ["error"]
|
||||||
"error",
|
|
||||||
# this is mitigated by a try/catch in https://github.com/psf/black/pull/2974/
|
|
||||||
# this ignore can be removed when support for aiohttp 3.7 is dropped.
|
|
||||||
'''ignore:Decorator `@unittest_run_loop` is no longer needed in aiohttp 3\.8\+:DeprecationWarning''',
|
|
||||||
# this is mitigated by a try/catch in https://github.com/psf/black/pull/3198/
|
|
||||||
# this ignore can be removed when support for aiohttp 3.x is dropped.
|
|
||||||
'''ignore:Middleware decorator is deprecated since 4\.0 and its behaviour is default, you can simply remove this decorator:DeprecationWarning''',
|
|
||||||
# aiohttp is using deprecated cgi modules - Safe to remove when fixed:
|
|
||||||
# https://github.com/aio-libs/aiohttp/issues/6905
|
|
||||||
'''ignore:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning''',
|
|
||||||
# Work around https://github.com/pytest-dev/pytest/issues/10977 for Python 3.12
|
|
||||||
'''ignore:(Attribute s|Attribute n|ast.Str|ast.Bytes|ast.NameConstant|ast.Num) is deprecated and will be removed in Python 3.14:DeprecationWarning''',
|
|
||||||
# Will be fixed with aiohttp 3.9.0
|
|
||||||
# https://github.com/aio-libs/aiohttp/pull/7302
|
|
||||||
"ignore:datetime.*utcfromtimestamp\\(\\) is deprecated and scheduled for removal:DeprecationWarning",
|
|
||||||
]
|
|
||||||
[tool.coverage.report]
|
[tool.coverage.report]
|
||||||
omit = [
|
omit = [
|
||||||
"src/blib2to3/*",
|
"src/blib2to3/*",
|
||||||
@ -246,9 +222,11 @@ branch = true
|
|||||||
# Specify the target platform details in config, so your developers are
|
# Specify the target platform details in config, so your developers are
|
||||||
# free to run mypy on Windows, Linux, or macOS and get consistent
|
# free to run mypy on Windows, Linux, or macOS and get consistent
|
||||||
# results.
|
# results.
|
||||||
python_version = "3.8"
|
python_version = "3.9"
|
||||||
mypy_path = "src"
|
mypy_path = "src"
|
||||||
strict = true
|
strict = true
|
||||||
|
strict_bytes = true
|
||||||
|
local_partial_types = true
|
||||||
# Unreachable blocks have been an issue when compiling mypyc, let's try to avoid 'em in the first place.
|
# Unreachable blocks have been an issue when compiling mypyc, let's try to avoid 'em in the first place.
|
||||||
warn_unreachable = true
|
warn_unreachable = true
|
||||||
implicit_reexport = true
|
implicit_reexport = true
|
||||||
|
@ -24,17 +24,12 @@
|
|||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any, Final, Literal
|
||||||
|
|
||||||
import click
|
import click
|
||||||
import urllib3
|
import urllib3
|
||||||
from packaging.version import Version
|
from packaging.version import Version
|
||||||
|
|
||||||
if sys.version_info >= (3, 8):
|
|
||||||
from typing import Final, Literal
|
|
||||||
else:
|
|
||||||
from typing_extensions import Final, Literal
|
|
||||||
|
|
||||||
COMMENT_FILE: Final = ".pr-comment.json"
|
COMMENT_FILE: Final = ".pr-comment.json"
|
||||||
DIFF_STEP_NAME: Final = "Generate HTML diff report"
|
DIFF_STEP_NAME: Final = "Generate HTML diff report"
|
||||||
DOCS_URL: Final = (
|
DOCS_URL: Final = (
|
||||||
|
@ -5,14 +5,11 @@
|
|||||||
a coverage-guided fuzzer I'm working on.
|
a coverage-guided fuzzer I'm working on.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
import hypothesmith
|
import hypothesmith
|
||||||
from hypothesis import HealthCheck, given, settings
|
from hypothesis import HealthCheck, given, settings
|
||||||
from hypothesis import strategies as st
|
from hypothesis import strategies as st
|
||||||
|
|
||||||
import black
|
import black
|
||||||
from blib2to3.pgen2.tokenize import TokenError
|
|
||||||
|
|
||||||
|
|
||||||
# This test uses the Hypothesis and Hypothesmith libraries to generate random
|
# This test uses the Hypothesis and Hypothesmith libraries to generate random
|
||||||
@ -45,23 +42,7 @@ def test_idempotent_any_syntatically_valid_python(
|
|||||||
compile(src_contents, "<string>", "exec") # else the bug is in hypothesmith
|
compile(src_contents, "<string>", "exec") # else the bug is in hypothesmith
|
||||||
|
|
||||||
# Then format the code...
|
# Then format the code...
|
||||||
try:
|
|
||||||
dst_contents = black.format_str(src_contents, mode=mode)
|
dst_contents = black.format_str(src_contents, mode=mode)
|
||||||
except black.InvalidInput:
|
|
||||||
# This is a bug - if it's valid Python code, as above, Black should be
|
|
||||||
# able to cope with it. See issues #970, #1012
|
|
||||||
# TODO: remove this try-except block when issues are resolved.
|
|
||||||
return
|
|
||||||
except TokenError as e:
|
|
||||||
if ( # Special-case logic for backslashes followed by newlines or end-of-input
|
|
||||||
e.args[0] == "EOF in multi-line statement"
|
|
||||||
and re.search(r"\\($|\r?\n)", src_contents) is not None
|
|
||||||
):
|
|
||||||
# This is a bug - if it's valid Python code, as above, Black should be
|
|
||||||
# able to cope with it. See issue #1012.
|
|
||||||
# TODO: remove this block when the issue is resolved.
|
|
||||||
return
|
|
||||||
raise
|
|
||||||
|
|
||||||
# And check that we got equivalent and stable output.
|
# And check that we got equivalent and stable output.
|
||||||
black.assert_equivalent(src_contents, dst_contents)
|
black.assert_equivalent(src_contents, dst_contents)
|
||||||
@ -80,7 +61,7 @@ def test_idempotent_any_syntatically_valid_python(
|
|||||||
try:
|
try:
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import atheris # type: ignore[import-not-found]
|
import atheris
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
@ -53,7 +53,7 @@ def main(schemastore: bool, outfile: IO[str]) -> None:
|
|||||||
schema: dict[str, Any] = {
|
schema: dict[str, Any] = {
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||||
"$id": (
|
"$id": (
|
||||||
"https://github.com/psf/black/blob/main/black/resources/black.schema.json"
|
"https://github.com/psf/black/blob/main/src/black/resources/black.schema.json"
|
||||||
),
|
),
|
||||||
"$comment": "tool.black table in pyproject.toml",
|
"$comment": "tool.black table in pyproject.toml",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
|
@ -17,13 +17,13 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
from collections.abc import Iterable
|
||||||
from os.path import basename, dirname, join
|
from os.path import basename, dirname, join
|
||||||
from typing import Iterable, Tuple
|
|
||||||
|
|
||||||
import wcwidth # type: ignore[import-not-found]
|
import wcwidth # type: ignore[import-not-found]
|
||||||
|
|
||||||
|
|
||||||
def make_width_table() -> Iterable[Tuple[int, int, int]]:
|
def make_width_table() -> Iterable[tuple[int, int, int]]:
|
||||||
start_codepoint = -1
|
start_codepoint = -1
|
||||||
end_codepoint = -1
|
end_codepoint = -1
|
||||||
range_width = -2
|
range_width = -2
|
||||||
@ -53,9 +53,9 @@ def main() -> None:
|
|||||||
f.write(f"""# Generated by {basename(__file__)}
|
f.write(f"""# Generated by {basename(__file__)}
|
||||||
# wcwidth {wcwidth.__version__}
|
# wcwidth {wcwidth.__version__}
|
||||||
# Unicode {wcwidth.list_versions()[-1]}
|
# Unicode {wcwidth.list_versions()[-1]}
|
||||||
from typing import Final, List, Tuple
|
from typing import Final
|
||||||
|
|
||||||
WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [
|
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
|
||||||
""")
|
""")
|
||||||
for triple in make_width_table():
|
for triple in make_width_table():
|
||||||
f.write(f" {triple!r},\n")
|
f.write(f" {triple!r},\n")
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
|
|
||||||
def git(*args: str) -> str:
|
def git(*args: str) -> str:
|
||||||
return check_output(["git"] + list(args)).decode("utf8").strip()
|
return check_output(["git", *args]).decode("utf8").strip()
|
||||||
|
|
||||||
|
|
||||||
def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> int:
|
def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> int:
|
||||||
@ -26,19 +26,19 @@ def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> in
|
|||||||
merge_base = git("merge-base", "HEAD", base_branch)
|
merge_base = git("merge-base", "HEAD", base_branch)
|
||||||
if not merge_base:
|
if not merge_base:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Could not find a common commit for current head and %s" % base_branch
|
f"Could not find a common commit for current head and {base_branch}"
|
||||||
)
|
)
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
commits = git(
|
commits = git(
|
||||||
"log", "--reverse", "--pretty=format:%H", "%s~1..HEAD" % merge_base
|
"log", "--reverse", "--pretty=format:%H", f"{merge_base}~1..HEAD"
|
||||||
).split()
|
).split()
|
||||||
for commit in commits:
|
for commit in commits:
|
||||||
git("checkout", commit, "-b%s-black" % commit)
|
git("checkout", commit, f"-b{commit}-black")
|
||||||
check_output(black_command, shell=True)
|
check_output(black_command, shell=True)
|
||||||
git("commit", "-aqm", "blackify")
|
git("commit", "-aqm", "blackify")
|
||||||
|
|
||||||
git("checkout", base_branch, "-b%s-black" % current_branch)
|
git("checkout", base_branch, f"-b{current_branch}-black")
|
||||||
|
|
||||||
for last_commit, commit in zip(commits, commits[1:]):
|
for last_commit, commit in zip(commits, commits[1:]):
|
||||||
allow_empty = (
|
allow_empty = (
|
||||||
@ -51,7 +51,7 @@ def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> in
|
|||||||
"diff",
|
"diff",
|
||||||
"--binary",
|
"--binary",
|
||||||
"--find-copies",
|
"--find-copies",
|
||||||
"%s-black..%s-black" % (last_commit, commit),
|
f"{last_commit}-black..{commit}-black",
|
||||||
],
|
],
|
||||||
stdout=PIPE,
|
stdout=PIPE,
|
||||||
)
|
)
|
||||||
@ -77,7 +77,7 @@ def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> in
|
|||||||
git("commit", "--allow-empty", "-aqC", commit)
|
git("commit", "--allow-empty", "-aqC", commit)
|
||||||
|
|
||||||
for commit in commits:
|
for commit in commits:
|
||||||
git("branch", "-qD", "%s-black" % commit)
|
git("branch", "-qD", f"{commit}-black")
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
@ -11,8 +11,7 @@
|
|||||||
import sys
|
import sys
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from subprocess import PIPE, run
|
from subprocess import run
|
||||||
from typing import List
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
NEW_VERSION_CHANGELOG_TEMPLATE = """\
|
NEW_VERSION_CHANGELOG_TEMPLATE = """\
|
||||||
@ -70,9 +69,9 @@ class NoGitTagsError(Exception): ... # noqa: E701,E761
|
|||||||
|
|
||||||
# TODO: Do better with alpha + beta releases
|
# TODO: Do better with alpha + beta releases
|
||||||
# Maybe we vendor packaging library
|
# Maybe we vendor packaging library
|
||||||
def get_git_tags(versions_only: bool = True) -> List[str]:
|
def get_git_tags(versions_only: bool = True) -> list[str]:
|
||||||
"""Pull out all tags or calvers only"""
|
"""Pull out all tags or calvers only"""
|
||||||
cp = run(["git", "tag"], stdout=PIPE, stderr=PIPE, check=True, encoding="utf8")
|
cp = run(["git", "tag"], capture_output=True, check=True, encoding="utf8")
|
||||||
if not cp.stdout:
|
if not cp.stdout:
|
||||||
LOG.error(f"Returned no git tags stdout: {cp.stderr}")
|
LOG.error(f"Returned no git tags stdout: {cp.stderr}")
|
||||||
raise NoGitTagsError
|
raise NoGitTagsError
|
||||||
|
@ -5,28 +5,22 @@
|
|||||||
import sys
|
import sys
|
||||||
import tokenize
|
import tokenize
|
||||||
import traceback
|
import traceback
|
||||||
|
from collections.abc import (
|
||||||
|
Collection,
|
||||||
|
Generator,
|
||||||
|
Iterator,
|
||||||
|
MutableMapping,
|
||||||
|
Sequence,
|
||||||
|
Sized,
|
||||||
|
)
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from dataclasses import replace
|
from dataclasses import replace
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from json.decoder import JSONDecodeError
|
from json.decoder import JSONDecodeError
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import (
|
from re import Pattern
|
||||||
Any,
|
from typing import Any, Optional, Union
|
||||||
Collection,
|
|
||||||
Dict,
|
|
||||||
Generator,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
MutableMapping,
|
|
||||||
Optional,
|
|
||||||
Pattern,
|
|
||||||
Sequence,
|
|
||||||
Set,
|
|
||||||
Sized,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
import click
|
import click
|
||||||
from click.core import ParameterSource
|
from click.core import ParameterSource
|
||||||
@ -57,12 +51,12 @@
|
|||||||
)
|
)
|
||||||
from black.handle_ipynb_magics import (
|
from black.handle_ipynb_magics import (
|
||||||
PYTHON_CELL_MAGICS,
|
PYTHON_CELL_MAGICS,
|
||||||
TRANSFORMED_MAGICS,
|
|
||||||
jupyter_dependencies_are_installed,
|
jupyter_dependencies_are_installed,
|
||||||
mask_cell,
|
mask_cell,
|
||||||
put_trailing_semicolon_back,
|
put_trailing_semicolon_back,
|
||||||
remove_trailing_semicolon,
|
remove_trailing_semicolon,
|
||||||
unmask_cell,
|
unmask_cell,
|
||||||
|
validate_cell,
|
||||||
)
|
)
|
||||||
from black.linegen import LN, LineGenerator, transform_line
|
from black.linegen import LN, LineGenerator, transform_line
|
||||||
from black.lines import EmptyLineTracker, LinesBlock
|
from black.lines import EmptyLineTracker, LinesBlock
|
||||||
@ -176,7 +170,7 @@ def read_pyproject_toml(
|
|||||||
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
|
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
|
||||||
)
|
)
|
||||||
|
|
||||||
default_map: Dict[str, Any] = {}
|
default_map: dict[str, Any] = {}
|
||||||
if ctx.default_map:
|
if ctx.default_map:
|
||||||
default_map.update(ctx.default_map)
|
default_map.update(ctx.default_map)
|
||||||
default_map.update(config)
|
default_map.update(config)
|
||||||
@ -186,9 +180,9 @@ def read_pyproject_toml(
|
|||||||
|
|
||||||
|
|
||||||
def spellcheck_pyproject_toml_keys(
|
def spellcheck_pyproject_toml_keys(
|
||||||
ctx: click.Context, config_keys: List[str], config_file_path: str
|
ctx: click.Context, config_keys: list[str], config_file_path: str
|
||||||
) -> None:
|
) -> None:
|
||||||
invalid_keys: List[str] = []
|
invalid_keys: list[str] = []
|
||||||
available_config_options = {param.name for param in ctx.command.params}
|
available_config_options = {param.name for param in ctx.command.params}
|
||||||
for key in config_keys:
|
for key in config_keys:
|
||||||
if key not in available_config_options:
|
if key not in available_config_options:
|
||||||
@ -202,8 +196,8 @@ def spellcheck_pyproject_toml_keys(
|
|||||||
|
|
||||||
|
|
||||||
def target_version_option_callback(
|
def target_version_option_callback(
|
||||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||||
) -> List[TargetVersion]:
|
) -> list[TargetVersion]:
|
||||||
"""Compute the target versions from a --target-version flag.
|
"""Compute the target versions from a --target-version flag.
|
||||||
|
|
||||||
This is its own function because mypy couldn't infer the type correctly
|
This is its own function because mypy couldn't infer the type correctly
|
||||||
@ -213,8 +207,8 @@ def target_version_option_callback(
|
|||||||
|
|
||||||
|
|
||||||
def enable_unstable_feature_callback(
|
def enable_unstable_feature_callback(
|
||||||
c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
|
c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...]
|
||||||
) -> List[Preview]:
|
) -> list[Preview]:
|
||||||
"""Compute the features from an --enable-unstable-feature flag."""
|
"""Compute the features from an --enable-unstable-feature flag."""
|
||||||
return [Preview[val] for val in v]
|
return [Preview[val] for val in v]
|
||||||
|
|
||||||
@ -519,7 +513,7 @@ def main( # noqa: C901
|
|||||||
ctx: click.Context,
|
ctx: click.Context,
|
||||||
code: Optional[str],
|
code: Optional[str],
|
||||||
line_length: int,
|
line_length: int,
|
||||||
target_version: List[TargetVersion],
|
target_version: list[TargetVersion],
|
||||||
check: bool,
|
check: bool,
|
||||||
diff: bool,
|
diff: bool,
|
||||||
line_ranges: Sequence[str],
|
line_ranges: Sequence[str],
|
||||||
@ -533,7 +527,7 @@ def main( # noqa: C901
|
|||||||
skip_magic_trailing_comma: bool,
|
skip_magic_trailing_comma: bool,
|
||||||
preview: bool,
|
preview: bool,
|
||||||
unstable: bool,
|
unstable: bool,
|
||||||
enable_unstable_feature: List[Preview],
|
enable_unstable_feature: list[Preview],
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
required_version: Optional[str],
|
required_version: Optional[str],
|
||||||
@ -543,12 +537,21 @@ def main( # noqa: C901
|
|||||||
force_exclude: Optional[Pattern[str]],
|
force_exclude: Optional[Pattern[str]],
|
||||||
stdin_filename: Optional[str],
|
stdin_filename: Optional[str],
|
||||||
workers: Optional[int],
|
workers: Optional[int],
|
||||||
src: Tuple[str, ...],
|
src: tuple[str, ...],
|
||||||
config: Optional[str],
|
config: Optional[str],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""The uncompromising code formatter."""
|
"""The uncompromising code formatter."""
|
||||||
ctx.ensure_object(dict)
|
ctx.ensure_object(dict)
|
||||||
|
|
||||||
|
assert sys.version_info >= (3, 9), "Black requires Python 3.9+"
|
||||||
|
if sys.version_info[:3] == (3, 12, 5):
|
||||||
|
out(
|
||||||
|
"Python 3.12.5 has a memory safety issue that can cause Black's "
|
||||||
|
"AST safety checks to fail. "
|
||||||
|
"Please upgrade to Python 3.12.6 or downgrade to Python 3.12.4"
|
||||||
|
)
|
||||||
|
ctx.exit(1)
|
||||||
|
|
||||||
if src and code is not None:
|
if src and code is not None:
|
||||||
out(
|
out(
|
||||||
main.get_usage(ctx)
|
main.get_usage(ctx)
|
||||||
@ -634,7 +637,7 @@ def main( # noqa: C901
|
|||||||
enabled_features=set(enable_unstable_feature),
|
enabled_features=set(enable_unstable_feature),
|
||||||
)
|
)
|
||||||
|
|
||||||
lines: List[Tuple[int, int]] = []
|
lines: list[tuple[int, int]] = []
|
||||||
if line_ranges:
|
if line_ranges:
|
||||||
if ipynb:
|
if ipynb:
|
||||||
err("Cannot use --line-ranges with ipynb files.")
|
err("Cannot use --line-ranges with ipynb files.")
|
||||||
@ -724,7 +727,7 @@ def main( # noqa: C901
|
|||||||
def get_sources(
|
def get_sources(
|
||||||
*,
|
*,
|
||||||
root: Path,
|
root: Path,
|
||||||
src: Tuple[str, ...],
|
src: tuple[str, ...],
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
include: Pattern[str],
|
include: Pattern[str],
|
||||||
@ -733,19 +736,25 @@ def get_sources(
|
|||||||
force_exclude: Optional[Pattern[str]],
|
force_exclude: Optional[Pattern[str]],
|
||||||
report: "Report",
|
report: "Report",
|
||||||
stdin_filename: Optional[str],
|
stdin_filename: Optional[str],
|
||||||
) -> Set[Path]:
|
) -> set[Path]:
|
||||||
"""Compute the set of files to be formatted."""
|
"""Compute the set of files to be formatted."""
|
||||||
sources: Set[Path] = set()
|
sources: set[Path] = set()
|
||||||
|
|
||||||
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
||||||
using_default_exclude = exclude is None
|
using_default_exclude = exclude is None
|
||||||
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
|
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
|
||||||
gitignore: Optional[Dict[Path, PathSpec]] = None
|
gitignore: Optional[dict[Path, PathSpec]] = None
|
||||||
root_gitignore = get_gitignore(root)
|
root_gitignore = get_gitignore(root)
|
||||||
|
|
||||||
for s in src:
|
for s in src:
|
||||||
if s == "-" and stdin_filename:
|
if s == "-" and stdin_filename:
|
||||||
path = Path(stdin_filename)
|
path = Path(stdin_filename)
|
||||||
|
if path_is_excluded(stdin_filename, force_exclude):
|
||||||
|
report.path_ignored(
|
||||||
|
path,
|
||||||
|
"--stdin-filename matches the --force-exclude regular expression",
|
||||||
|
)
|
||||||
|
continue
|
||||||
is_stdin = True
|
is_stdin = True
|
||||||
else:
|
else:
|
||||||
path = Path(s)
|
path = Path(s)
|
||||||
@ -832,7 +841,7 @@ def reformat_code(
|
|||||||
mode: Mode,
|
mode: Mode,
|
||||||
report: Report,
|
report: Report,
|
||||||
*,
|
*,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Reformat and print out `content` without spawning child processes.
|
Reformat and print out `content` without spawning child processes.
|
||||||
@ -865,7 +874,7 @@ def reformat_one(
|
|||||||
mode: Mode,
|
mode: Mode,
|
||||||
report: "Report",
|
report: "Report",
|
||||||
*,
|
*,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Reformat a single file under `src` without spawning child processes.
|
"""Reformat a single file under `src` without spawning child processes.
|
||||||
|
|
||||||
@ -921,7 +930,7 @@ def format_file_in_place(
|
|||||||
write_back: WriteBack = WriteBack.NO,
|
write_back: WriteBack = WriteBack.NO,
|
||||||
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
|
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
|
||||||
*,
|
*,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Format file under `src` path. Return True if changed.
|
"""Format file under `src` path. Return True if changed.
|
||||||
|
|
||||||
@ -988,7 +997,7 @@ def format_stdin_to_stdout(
|
|||||||
content: Optional[str] = None,
|
content: Optional[str] = None,
|
||||||
write_back: WriteBack = WriteBack.NO,
|
write_back: WriteBack = WriteBack.NO,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Format file on stdin. Return True if changed.
|
"""Format file on stdin. Return True if changed.
|
||||||
|
|
||||||
@ -1039,7 +1048,7 @@ def check_stability_and_equivalence(
|
|||||||
dst_contents: str,
|
dst_contents: str,
|
||||||
*,
|
*,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Perform stability and equivalence checks.
|
"""Perform stability and equivalence checks.
|
||||||
|
|
||||||
@ -1056,7 +1065,7 @@ def format_file_contents(
|
|||||||
*,
|
*,
|
||||||
fast: bool,
|
fast: bool,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
lines: Collection[Tuple[int, int]] = (),
|
lines: Collection[tuple[int, int]] = (),
|
||||||
) -> FileContent:
|
) -> FileContent:
|
||||||
"""Reformat contents of a file and return new contents.
|
"""Reformat contents of a file and return new contents.
|
||||||
|
|
||||||
@ -1079,32 +1088,6 @@ def format_file_contents(
|
|||||||
return dst_contents
|
return dst_contents
|
||||||
|
|
||||||
|
|
||||||
def validate_cell(src: str, mode: Mode) -> None:
|
|
||||||
"""Check that cell does not already contain TransformerManager transformations,
|
|
||||||
or non-Python cell magics, which might cause tokenizer_rt to break because of
|
|
||||||
indentations.
|
|
||||||
|
|
||||||
If a cell contains ``!ls``, then it'll be transformed to
|
|
||||||
``get_ipython().system('ls')``. However, if the cell originally contained
|
|
||||||
``get_ipython().system('ls')``, then it would get transformed in the same way:
|
|
||||||
|
|
||||||
>>> TransformerManager().transform_cell("get_ipython().system('ls')")
|
|
||||||
"get_ipython().system('ls')\n"
|
|
||||||
>>> TransformerManager().transform_cell("!ls")
|
|
||||||
"get_ipython().system('ls')\n"
|
|
||||||
|
|
||||||
Due to the impossibility of safely roundtripping in such situations, cells
|
|
||||||
containing transformed magics will be ignored.
|
|
||||||
"""
|
|
||||||
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
|
|
||||||
raise NothingChanged
|
|
||||||
if (
|
|
||||||
src[:2] == "%%"
|
|
||||||
and src.split()[0][2:] not in PYTHON_CELL_MAGICS | mode.python_cell_magics
|
|
||||||
):
|
|
||||||
raise NothingChanged
|
|
||||||
|
|
||||||
|
|
||||||
def format_cell(src: str, *, fast: bool, mode: Mode) -> str:
|
def format_cell(src: str, *, fast: bool, mode: Mode) -> str:
|
||||||
"""Format code in given cell of Jupyter notebook.
|
"""Format code in given cell of Jupyter notebook.
|
||||||
|
|
||||||
@ -1187,7 +1170,7 @@ def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileCon
|
|||||||
|
|
||||||
|
|
||||||
def format_str(
|
def format_str(
|
||||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Reformat a string and return new contents.
|
"""Reformat a string and return new contents.
|
||||||
|
|
||||||
@ -1234,10 +1217,10 @@ def f(
|
|||||||
|
|
||||||
|
|
||||||
def _format_str_once(
|
def _format_str_once(
|
||||||
src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = ()
|
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
|
||||||
) -> str:
|
) -> str:
|
||||||
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
|
src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
|
||||||
dst_blocks: List[LinesBlock] = []
|
dst_blocks: list[LinesBlock] = []
|
||||||
if mode.target_versions:
|
if mode.target_versions:
|
||||||
versions = mode.target_versions
|
versions = mode.target_versions
|
||||||
else:
|
else:
|
||||||
@ -1287,7 +1270,7 @@ def _format_str_once(
|
|||||||
return "".join(dst_contents)
|
return "".join(dst_contents)
|
||||||
|
|
||||||
|
|
||||||
def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
def decode_bytes(src: bytes) -> tuple[FileContent, Encoding, NewLine]:
|
||||||
"""Return a tuple of (decoded_contents, encoding, newline).
|
"""Return a tuple of (decoded_contents, encoding, newline).
|
||||||
|
|
||||||
`newline` is either CRLF or LF but `decoded_contents` is decoded with
|
`newline` is either CRLF or LF but `decoded_contents` is decoded with
|
||||||
@ -1305,8 +1288,8 @@ def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
|
|||||||
|
|
||||||
|
|
||||||
def get_features_used( # noqa: C901
|
def get_features_used( # noqa: C901
|
||||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
node: Node, *, future_imports: Optional[set[str]] = None
|
||||||
) -> Set[Feature]:
|
) -> set[Feature]:
|
||||||
"""Return a set of (relatively) new Python features used in this file.
|
"""Return a set of (relatively) new Python features used in this file.
|
||||||
|
|
||||||
Currently looking for:
|
Currently looking for:
|
||||||
@ -1324,7 +1307,7 @@ def get_features_used( # noqa: C901
|
|||||||
- except* clause;
|
- except* clause;
|
||||||
- variadic generics;
|
- variadic generics;
|
||||||
"""
|
"""
|
||||||
features: Set[Feature] = set()
|
features: set[Feature] = set()
|
||||||
if future_imports:
|
if future_imports:
|
||||||
features |= {
|
features |= {
|
||||||
FUTURE_FLAG_TO_FEATURE[future_import]
|
FUTURE_FLAG_TO_FEATURE[future_import]
|
||||||
@ -1462,8 +1445,8 @@ def _contains_asexpr(node: Union[Node, Leaf]) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
def detect_target_versions(
|
def detect_target_versions(
|
||||||
node: Node, *, future_imports: Optional[Set[str]] = None
|
node: Node, *, future_imports: Optional[set[str]] = None
|
||||||
) -> Set[TargetVersion]:
|
) -> set[TargetVersion]:
|
||||||
"""Detect the version to target based on the nodes used."""
|
"""Detect the version to target based on the nodes used."""
|
||||||
features = get_features_used(node, future_imports=future_imports)
|
features = get_features_used(node, future_imports=future_imports)
|
||||||
return {
|
return {
|
||||||
@ -1471,11 +1454,11 @@ def detect_target_versions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_future_imports(node: Node) -> Set[str]:
|
def get_future_imports(node: Node) -> set[str]:
|
||||||
"""Return a set of __future__ imports in the file."""
|
"""Return a set of __future__ imports in the file."""
|
||||||
imports: Set[str] = set()
|
imports: set[str] = set()
|
||||||
|
|
||||||
def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
|
def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]:
|
||||||
for child in children:
|
for child in children:
|
||||||
if isinstance(child, Leaf):
|
if isinstance(child, Leaf):
|
||||||
if child.type == token.NAME:
|
if child.type == token.NAME:
|
||||||
@ -1521,6 +1504,13 @@ def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
|
|||||||
return imports
|
return imports
|
||||||
|
|
||||||
|
|
||||||
|
def _black_info() -> str:
|
||||||
|
return (
|
||||||
|
f"Black {__version__} on "
|
||||||
|
f"Python ({platform.python_implementation()}) {platform.python_version()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def assert_equivalent(src: str, dst: str) -> None:
|
def assert_equivalent(src: str, dst: str) -> None:
|
||||||
"""Raise AssertionError if `src` and `dst` aren't equivalent."""
|
"""Raise AssertionError if `src` and `dst` aren't equivalent."""
|
||||||
try:
|
try:
|
||||||
@ -1538,7 +1528,7 @@ def assert_equivalent(src: str, dst: str) -> None:
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
|
log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
|
||||||
raise ASTSafetyError(
|
raise ASTSafetyError(
|
||||||
f"INTERNAL ERROR: Black produced invalid code: {exc}. "
|
f"INTERNAL ERROR: {_black_info()} produced invalid code: {exc}. "
|
||||||
"Please report a bug on https://github.com/psf/black/issues. "
|
"Please report a bug on https://github.com/psf/black/issues. "
|
||||||
f"This invalid output might be helpful: {log}"
|
f"This invalid output might be helpful: {log}"
|
||||||
) from None
|
) from None
|
||||||
@ -1548,14 +1538,14 @@ def assert_equivalent(src: str, dst: str) -> None:
|
|||||||
if src_ast_str != dst_ast_str:
|
if src_ast_str != dst_ast_str:
|
||||||
log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
|
log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
|
||||||
raise ASTSafetyError(
|
raise ASTSafetyError(
|
||||||
"INTERNAL ERROR: Black produced code that is not equivalent to the"
|
f"INTERNAL ERROR: {_black_info()} produced code that is not equivalent to"
|
||||||
" source. Please report a bug on "
|
" the source. Please report a bug on https://github.com/psf/black/issues."
|
||||||
f"https://github.com/psf/black/issues. This diff might be helpful: {log}"
|
f" This diff might be helpful: {log}"
|
||||||
) from None
|
) from None
|
||||||
|
|
||||||
|
|
||||||
def assert_stable(
|
def assert_stable(
|
||||||
src: str, dst: str, mode: Mode, *, lines: Collection[Tuple[int, int]] = ()
|
src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = ()
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Raise AssertionError if `dst` reformats differently the second time."""
|
"""Raise AssertionError if `dst` reformats differently the second time."""
|
||||||
if lines:
|
if lines:
|
||||||
@ -1576,9 +1566,9 @@ def assert_stable(
|
|||||||
diff(dst, newdst, "first pass", "second pass"),
|
diff(dst, newdst, "first pass", "second pass"),
|
||||||
)
|
)
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"INTERNAL ERROR: Black produced different code on the second pass of the"
|
f"INTERNAL ERROR: {_black_info()} produced different code on the second"
|
||||||
" formatter. Please report a bug on https://github.com/psf/black/issues."
|
" pass of the formatter. Please report a bug on"
|
||||||
f" This diff might be helpful: {log}"
|
f" https://github.com/psf/black/issues. This diff might be helpful: {log}"
|
||||||
) from None
|
) from None
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# Generated by make_width_table.py
|
# Generated by make_width_table.py
|
||||||
# wcwidth 0.2.6
|
# wcwidth 0.2.6
|
||||||
# Unicode 15.0.0
|
# Unicode 15.0.0
|
||||||
from typing import Final, List, Tuple
|
from typing import Final
|
||||||
|
|
||||||
WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [
|
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
|
||||||
(0, 0, 0),
|
(0, 0, 0),
|
||||||
(1, 31, -1),
|
(1, 31, -1),
|
||||||
(127, 159, -1),
|
(127, 159, -1),
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
"""Builds on top of nodes.py to track brackets."""
|
"""Builds on top of nodes.py to track brackets."""
|
||||||
|
|
||||||
|
from collections.abc import Iterable, Sequence
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import Dict, Final, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
from typing import Final, Optional, Union
|
||||||
|
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
BRACKET,
|
BRACKET,
|
||||||
@ -60,12 +61,12 @@ class BracketTracker:
|
|||||||
"""Keeps track of brackets on a line."""
|
"""Keeps track of brackets on a line."""
|
||||||
|
|
||||||
depth: int = 0
|
depth: int = 0
|
||||||
bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||||
delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
|
delimiters: dict[LeafID, Priority] = field(default_factory=dict)
|
||||||
previous: Optional[Leaf] = None
|
previous: Optional[Leaf] = None
|
||||||
_for_loop_depths: List[int] = field(default_factory=list)
|
_for_loop_depths: list[int] = field(default_factory=list)
|
||||||
_lambda_argument_depths: List[int] = field(default_factory=list)
|
_lambda_argument_depths: list[int] = field(default_factory=list)
|
||||||
invisible: List[Leaf] = field(default_factory=list)
|
invisible: list[Leaf] = field(default_factory=list)
|
||||||
|
|
||||||
def mark(self, leaf: Leaf) -> None:
|
def mark(self, leaf: Leaf) -> None:
|
||||||
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
||||||
@ -353,7 +354,7 @@ def max_delimiter_priority_in_atom(node: LN) -> Priority:
|
|||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> Set[LeafID]:
|
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]:
|
||||||
"""Return leaves that are inside matching brackets.
|
"""Return leaves that are inside matching brackets.
|
||||||
|
|
||||||
The input `leaves` can have non-matching brackets at the head or tail parts.
|
The input `leaves` can have non-matching brackets at the head or tail parts.
|
||||||
|
@ -5,9 +5,10 @@
|
|||||||
import pickle
|
import pickle
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
|
from collections.abc import Iterable
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Iterable, NamedTuple, Set, Tuple
|
from typing import NamedTuple
|
||||||
|
|
||||||
from platformdirs import user_cache_dir
|
from platformdirs import user_cache_dir
|
||||||
|
|
||||||
@ -55,7 +56,7 @@ def get_cache_file(mode: Mode) -> Path:
|
|||||||
class Cache:
|
class Cache:
|
||||||
mode: Mode
|
mode: Mode
|
||||||
cache_file: Path
|
cache_file: Path
|
||||||
file_data: Dict[str, FileData] = field(default_factory=dict)
|
file_data: dict[str, FileData] = field(default_factory=dict)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def read(cls, mode: Mode) -> Self:
|
def read(cls, mode: Mode) -> Self:
|
||||||
@ -76,7 +77,7 @@ def read(cls, mode: Mode) -> Self:
|
|||||||
|
|
||||||
with cache_file.open("rb") as fobj:
|
with cache_file.open("rb") as fobj:
|
||||||
try:
|
try:
|
||||||
data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)
|
data: dict[str, tuple[float, int, str]] = pickle.load(fobj)
|
||||||
file_data = {k: FileData(*v) for k, v in data.items()}
|
file_data = {k: FileData(*v) for k, v in data.items()}
|
||||||
except (pickle.UnpicklingError, ValueError, IndexError):
|
except (pickle.UnpicklingError, ValueError, IndexError):
|
||||||
return cls(mode, cache_file)
|
return cls(mode, cache_file)
|
||||||
@ -114,14 +115,14 @@ def is_changed(self, source: Path) -> bool:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
|
def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]:
|
||||||
"""Split an iterable of paths in `sources` into two sets.
|
"""Split an iterable of paths in `sources` into two sets.
|
||||||
|
|
||||||
The first contains paths of files that modified on disk or are not in the
|
The first contains paths of files that modified on disk or are not in the
|
||||||
cache. The other contains paths to non-modified files.
|
cache. The other contains paths to non-modified files.
|
||||||
"""
|
"""
|
||||||
changed: Set[Path] = set()
|
changed: set[Path] = set()
|
||||||
done: Set[Path] = set()
|
done: set[Path] = set()
|
||||||
for src in sources:
|
for src in sources:
|
||||||
if self.is_changed(src):
|
if self.is_changed(src):
|
||||||
changed.add(src)
|
changed.add(src)
|
||||||
@ -139,9 +140,8 @@ def write(self, sources: Iterable[Path]) -> None:
|
|||||||
with tempfile.NamedTemporaryFile(
|
with tempfile.NamedTemporaryFile(
|
||||||
dir=str(self.cache_file.parent), delete=False
|
dir=str(self.cache_file.parent), delete=False
|
||||||
) as f:
|
) as f:
|
||||||
# We store raw tuples in the cache because pickling NamedTuples
|
# We store raw tuples in the cache because it's faster.
|
||||||
# doesn't work with mypyc on Python 3.8, and because it's faster.
|
data: dict[str, tuple[float, int, str]] = {
|
||||||
data: Dict[str, Tuple[float, int, str]] = {
|
|
||||||
k: (*v,) for k, v in self.file_data.items()
|
k: (*v,) for k, v in self.file_data.items()
|
||||||
}
|
}
|
||||||
pickle.dump(data, f, protocol=4)
|
pickle.dump(data, f, protocol=4)
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import re
|
import re
|
||||||
|
from collections.abc import Collection, Iterator
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from typing import Collection, Final, Iterator, List, Optional, Tuple, Union
|
from typing import Final, Optional, Union
|
||||||
|
|
||||||
from black.mode import Mode, Preview
|
from black.mode import Mode, Preview
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
@ -77,9 +78,9 @@ def generate_comments(leaf: LN) -> Iterator[Leaf]:
|
|||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=4096)
|
@lru_cache(maxsize=4096)
|
||||||
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
|
def list_comments(prefix: str, *, is_endmarker: bool) -> list[ProtoComment]:
|
||||||
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
||||||
result: List[ProtoComment] = []
|
result: list[ProtoComment] = []
|
||||||
if not prefix or "#" not in prefix:
|
if not prefix or "#" not in prefix:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -166,7 +167,7 @@ def make_comment(content: str) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def normalize_fmt_off(
|
def normalize_fmt_off(
|
||||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
||||||
try_again = True
|
try_again = True
|
||||||
@ -175,7 +176,7 @@ def normalize_fmt_off(
|
|||||||
|
|
||||||
|
|
||||||
def convert_one_fmt_off_pair(
|
def convert_one_fmt_off_pair(
|
||||||
node: Node, mode: Mode, lines: Collection[Tuple[int, int]]
|
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
||||||
|
|
||||||
@ -184,24 +185,24 @@ def convert_one_fmt_off_pair(
|
|||||||
for leaf in node.leaves():
|
for leaf in node.leaves():
|
||||||
previous_consumed = 0
|
previous_consumed = 0
|
||||||
for comment in list_comments(leaf.prefix, is_endmarker=False):
|
for comment in list_comments(leaf.prefix, is_endmarker=False):
|
||||||
should_pass_fmt = comment.value in FMT_OFF or _contains_fmt_skip_comment(
|
is_fmt_off = comment.value in FMT_OFF
|
||||||
comment.value, mode
|
is_fmt_skip = _contains_fmt_skip_comment(comment.value, mode)
|
||||||
)
|
if (not is_fmt_off and not is_fmt_skip) or (
|
||||||
if not should_pass_fmt:
|
# Invalid use when `# fmt: off` is applied before a closing bracket.
|
||||||
|
is_fmt_off
|
||||||
|
and leaf.type in CLOSING_BRACKETS
|
||||||
|
):
|
||||||
previous_consumed = comment.consumed
|
previous_consumed = comment.consumed
|
||||||
continue
|
continue
|
||||||
# We only want standalone comments. If there's no previous leaf or
|
# We only want standalone comments. If there's no previous leaf or
|
||||||
# the previous leaf is indentation, it's a standalone comment in
|
# the previous leaf is indentation, it's a standalone comment in
|
||||||
# disguise.
|
# disguise.
|
||||||
if should_pass_fmt and comment.type != STANDALONE_COMMENT:
|
if comment.type != STANDALONE_COMMENT:
|
||||||
prev = preceding_leaf(leaf)
|
prev = preceding_leaf(leaf)
|
||||||
if prev:
|
if prev:
|
||||||
if comment.value in FMT_OFF and prev.type not in WHITESPACE:
|
if is_fmt_off and prev.type not in WHITESPACE:
|
||||||
continue
|
continue
|
||||||
if (
|
if is_fmt_skip and prev.type in WHITESPACE:
|
||||||
_contains_fmt_skip_comment(comment.value, mode)
|
|
||||||
and prev.type in WHITESPACE
|
|
||||||
):
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ignored_nodes = list(generate_ignored_nodes(leaf, comment, mode))
|
ignored_nodes = list(generate_ignored_nodes(leaf, comment, mode))
|
||||||
@ -213,7 +214,7 @@ def convert_one_fmt_off_pair(
|
|||||||
prefix = first.prefix
|
prefix = first.prefix
|
||||||
if comment.value in FMT_OFF:
|
if comment.value in FMT_OFF:
|
||||||
first.prefix = prefix[comment.consumed :]
|
first.prefix = prefix[comment.consumed :]
|
||||||
if _contains_fmt_skip_comment(comment.value, mode):
|
if is_fmt_skip:
|
||||||
first.prefix = ""
|
first.prefix = ""
|
||||||
standalone_comment_prefix = prefix
|
standalone_comment_prefix = prefix
|
||||||
else:
|
else:
|
||||||
@ -233,12 +234,8 @@ def convert_one_fmt_off_pair(
|
|||||||
fmt_off_prefix = fmt_off_prefix.split("\n")[-1]
|
fmt_off_prefix = fmt_off_prefix.split("\n")[-1]
|
||||||
standalone_comment_prefix += fmt_off_prefix
|
standalone_comment_prefix += fmt_off_prefix
|
||||||
hidden_value = comment.value + "\n" + hidden_value
|
hidden_value = comment.value + "\n" + hidden_value
|
||||||
if _contains_fmt_skip_comment(comment.value, mode):
|
if is_fmt_skip:
|
||||||
hidden_value += (
|
hidden_value += comment.leading_whitespace + comment.value
|
||||||
comment.leading_whitespace
|
|
||||||
if Preview.no_normalize_fmt_skip_whitespace in mode
|
|
||||||
else " "
|
|
||||||
) + comment.value
|
|
||||||
if hidden_value.endswith("\n"):
|
if hidden_value.endswith("\n"):
|
||||||
# That happens when one of the `ignored_nodes` ended with a NEWLINE
|
# That happens when one of the `ignored_nodes` ended with a NEWLINE
|
||||||
# leaf (possibly followed by a DEDENT).
|
# leaf (possibly followed by a DEDENT).
|
||||||
@ -273,7 +270,7 @@ def generate_ignored_nodes(
|
|||||||
Stops at the end of the block.
|
Stops at the end of the block.
|
||||||
"""
|
"""
|
||||||
if _contains_fmt_skip_comment(comment.value, mode):
|
if _contains_fmt_skip_comment(comment.value, mode):
|
||||||
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment)
|
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, mode)
|
||||||
return
|
return
|
||||||
container: Optional[LN] = container_of(leaf)
|
container: Optional[LN] = container_of(leaf)
|
||||||
while container is not None and container.type != token.ENDMARKER:
|
while container is not None and container.type != token.ENDMARKER:
|
||||||
@ -312,23 +309,67 @@ def generate_ignored_nodes(
|
|||||||
|
|
||||||
|
|
||||||
def _generate_ignored_nodes_from_fmt_skip(
|
def _generate_ignored_nodes_from_fmt_skip(
|
||||||
leaf: Leaf, comment: ProtoComment
|
leaf: Leaf, comment: ProtoComment, mode: Mode
|
||||||
) -> Iterator[LN]:
|
) -> Iterator[LN]:
|
||||||
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
|
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
|
||||||
prev_sibling = leaf.prev_sibling
|
prev_sibling = leaf.prev_sibling
|
||||||
parent = leaf.parent
|
parent = leaf.parent
|
||||||
|
ignored_nodes: list[LN] = []
|
||||||
# Need to properly format the leaf prefix to compare it to comment.value,
|
# Need to properly format the leaf prefix to compare it to comment.value,
|
||||||
# which is also formatted
|
# which is also formatted
|
||||||
comments = list_comments(leaf.prefix, is_endmarker=False)
|
comments = list_comments(leaf.prefix, is_endmarker=False)
|
||||||
if not comments or comment.value != comments[0].value:
|
if not comments or comment.value != comments[0].value:
|
||||||
return
|
return
|
||||||
if prev_sibling is not None:
|
if prev_sibling is not None:
|
||||||
leaf.prefix = ""
|
leaf.prefix = leaf.prefix[comment.consumed :]
|
||||||
|
|
||||||
|
if Preview.fix_fmt_skip_in_one_liners not in mode:
|
||||||
siblings = [prev_sibling]
|
siblings = [prev_sibling]
|
||||||
while "\n" not in prev_sibling.prefix and prev_sibling.prev_sibling is not None:
|
while (
|
||||||
|
"\n" not in prev_sibling.prefix
|
||||||
|
and prev_sibling.prev_sibling is not None
|
||||||
|
):
|
||||||
prev_sibling = prev_sibling.prev_sibling
|
prev_sibling = prev_sibling.prev_sibling
|
||||||
siblings.insert(0, prev_sibling)
|
siblings.insert(0, prev_sibling)
|
||||||
yield from siblings
|
yield from siblings
|
||||||
|
return
|
||||||
|
|
||||||
|
# Generates the nodes to be ignored by `fmt: skip`.
|
||||||
|
|
||||||
|
# Nodes to ignore are the ones on the same line as the
|
||||||
|
# `# fmt: skip` comment, excluding the `# fmt: skip`
|
||||||
|
# node itself.
|
||||||
|
|
||||||
|
# Traversal process (starting at the `# fmt: skip` node):
|
||||||
|
# 1. Move to the `prev_sibling` of the current node.
|
||||||
|
# 2. If `prev_sibling` has children, go to its rightmost leaf.
|
||||||
|
# 3. If there’s no `prev_sibling`, move up to the parent
|
||||||
|
# node and repeat.
|
||||||
|
# 4. Continue until:
|
||||||
|
# a. You encounter an `INDENT` or `NEWLINE` node (indicates
|
||||||
|
# start of the line).
|
||||||
|
# b. You reach the root node.
|
||||||
|
|
||||||
|
# Include all visited LEAVES in the ignored list, except INDENT
|
||||||
|
# or NEWLINE leaves.
|
||||||
|
|
||||||
|
current_node = prev_sibling
|
||||||
|
ignored_nodes = [current_node]
|
||||||
|
if current_node.prev_sibling is None and current_node.parent is not None:
|
||||||
|
current_node = current_node.parent
|
||||||
|
while "\n" not in current_node.prefix and current_node.prev_sibling is not None:
|
||||||
|
leaf_nodes = list(current_node.prev_sibling.leaves())
|
||||||
|
current_node = leaf_nodes[-1] if leaf_nodes else current_node
|
||||||
|
|
||||||
|
if current_node.type in (token.NEWLINE, token.INDENT):
|
||||||
|
current_node.prefix = ""
|
||||||
|
break
|
||||||
|
|
||||||
|
ignored_nodes.insert(0, current_node)
|
||||||
|
|
||||||
|
if current_node.prev_sibling is None and current_node.parent is not None:
|
||||||
|
current_node = current_node.parent
|
||||||
|
yield from ignored_nodes
|
||||||
elif (
|
elif (
|
||||||
parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE
|
parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE
|
||||||
):
|
):
|
||||||
@ -336,7 +377,6 @@ def _generate_ignored_nodes_from_fmt_skip(
|
|||||||
# statements. The ignored nodes should be previous siblings of the
|
# statements. The ignored nodes should be previous siblings of the
|
||||||
# parent suite node.
|
# parent suite node.
|
||||||
leaf.prefix = ""
|
leaf.prefix = ""
|
||||||
ignored_nodes: List[LN] = []
|
|
||||||
parent_sibling = parent.prev_sibling
|
parent_sibling = parent.prev_sibling
|
||||||
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
||||||
ignored_nodes.insert(0, parent_sibling)
|
ignored_nodes.insert(0, parent_sibling)
|
||||||
@ -376,7 +416,7 @@ def children_contains_fmt_on(container: LN) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def contains_pragma_comment(comment_list: List[Leaf]) -> bool:
|
def contains_pragma_comment(comment_list: list[Leaf]) -> bool:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
True iff one of the comments in @comment_list is a pragma used by one
|
True iff one of the comments in @comment_list is a pragma used by one
|
||||||
|
@ -10,10 +10,11 @@
|
|||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
from collections.abc import Iterable
|
||||||
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
||||||
from multiprocessing import Manager
|
from multiprocessing import Manager
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Iterable, Optional, Set
|
from typing import Any, Optional
|
||||||
|
|
||||||
from mypy_extensions import mypyc_attr
|
from mypy_extensions import mypyc_attr
|
||||||
|
|
||||||
@ -69,7 +70,7 @@ def shutdown(loop: asyncio.AbstractEventLoop) -> None:
|
|||||||
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
||||||
@mypyc_attr(patchable=True)
|
@mypyc_attr(patchable=True)
|
||||||
def reformat_many(
|
def reformat_many(
|
||||||
sources: Set[Path],
|
sources: set[Path],
|
||||||
fast: bool,
|
fast: bool,
|
||||||
write_back: WriteBack,
|
write_back: WriteBack,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
@ -119,7 +120,7 @@ def reformat_many(
|
|||||||
|
|
||||||
|
|
||||||
async def schedule_formatting(
|
async def schedule_formatting(
|
||||||
sources: Set[Path],
|
sources: set[Path],
|
||||||
fast: bool,
|
fast: bool,
|
||||||
write_back: WriteBack,
|
write_back: WriteBack,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
|
from collections.abc import Iterator
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import Any, Iterator, List, TypeVar, Union
|
from typing import Any, TypeVar, Union
|
||||||
|
|
||||||
from black.nodes import Visitor
|
from black.nodes import Visitor
|
||||||
from black.output import out
|
from black.output import out
|
||||||
@ -14,7 +15,7 @@
|
|||||||
@dataclass
|
@dataclass
|
||||||
class DebugVisitor(Visitor[T]):
|
class DebugVisitor(Visitor[T]):
|
||||||
tree_depth: int = 0
|
tree_depth: int = 0
|
||||||
list_output: List[str] = field(default_factory=list)
|
list_output: list[str] = field(default_factory=list)
|
||||||
print_output: bool = True
|
print_output: bool = True
|
||||||
|
|
||||||
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
||||||
|
@ -1,21 +1,11 @@
|
|||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from collections.abc import Iterable, Iterator, Sequence
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import (
|
from re import Pattern
|
||||||
TYPE_CHECKING,
|
from typing import TYPE_CHECKING, Any, Optional, Union
|
||||||
Any,
|
|
||||||
Dict,
|
|
||||||
Iterable,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Pattern,
|
|
||||||
Sequence,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from mypy_extensions import mypyc_attr
|
from mypy_extensions import mypyc_attr
|
||||||
from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet
|
from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet
|
||||||
@ -43,7 +33,7 @@
|
|||||||
|
|
||||||
|
|
||||||
@lru_cache
|
@lru_cache
|
||||||
def _load_toml(path: Union[Path, str]) -> Dict[str, Any]:
|
def _load_toml(path: Union[Path, str]) -> dict[str, Any]:
|
||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
return tomllib.load(f)
|
return tomllib.load(f)
|
||||||
|
|
||||||
@ -56,9 +46,12 @@ def _cached_resolve(path: Path) -> Path:
|
|||||||
@lru_cache
|
@lru_cache
|
||||||
def find_project_root(
|
def find_project_root(
|
||||||
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
||||||
) -> Tuple[Path, str]:
|
) -> tuple[Path, str]:
|
||||||
"""Return a directory containing .git, .hg, or pyproject.toml.
|
"""Return a directory containing .git, .hg, or pyproject.toml.
|
||||||
|
|
||||||
|
pyproject.toml files are only considered if they contain a [tool.black]
|
||||||
|
section and are ignored otherwise.
|
||||||
|
|
||||||
That directory will be a common parent of all files and directories
|
That directory will be a common parent of all files and directories
|
||||||
passed in `srcs`.
|
passed in `srcs`.
|
||||||
|
|
||||||
@ -103,7 +96,7 @@ def find_project_root(
|
|||||||
|
|
||||||
|
|
||||||
def find_pyproject_toml(
|
def find_pyproject_toml(
|
||||||
path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None
|
path_search_start: tuple[str, ...], stdin_filename: Optional[str] = None
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
||||||
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
||||||
@ -125,13 +118,13 @@ def find_pyproject_toml(
|
|||||||
|
|
||||||
|
|
||||||
@mypyc_attr(patchable=True)
|
@mypyc_attr(patchable=True)
|
||||||
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
def parse_pyproject_toml(path_config: str) -> dict[str, Any]:
|
||||||
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
||||||
|
|
||||||
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
||||||
"""
|
"""
|
||||||
pyproject_toml = _load_toml(path_config)
|
pyproject_toml = _load_toml(path_config)
|
||||||
config: Dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||||
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
||||||
|
|
||||||
if "target_version" not in config:
|
if "target_version" not in config:
|
||||||
@ -143,8 +136,8 @@ def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
|
|||||||
|
|
||||||
|
|
||||||
def infer_target_version(
|
def infer_target_version(
|
||||||
pyproject_toml: Dict[str, Any],
|
pyproject_toml: dict[str, Any],
|
||||||
) -> Optional[List[TargetVersion]]:
|
) -> Optional[list[TargetVersion]]:
|
||||||
"""Infer Black's target version from the project metadata in pyproject.toml.
|
"""Infer Black's target version from the project metadata in pyproject.toml.
|
||||||
|
|
||||||
Supports the PyPA standard format (PEP 621):
|
Supports the PyPA standard format (PEP 621):
|
||||||
@ -167,7 +160,7 @@ def infer_target_version(
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]:
|
def parse_req_python_version(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||||
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
||||||
|
|
||||||
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
||||||
@ -182,7 +175,7 @@ def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersio
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]:
|
def parse_req_python_specifier(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||||
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
||||||
|
|
||||||
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
||||||
@ -193,7 +186,7 @@ def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVers
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
||||||
compatible_versions: List[str] = list(specifier_set.filter(target_version_map))
|
compatible_versions: list[str] = list(specifier_set.filter(target_version_map))
|
||||||
if compatible_versions:
|
if compatible_versions:
|
||||||
return [target_version_map[v] for v in compatible_versions]
|
return [target_version_map[v] for v in compatible_versions]
|
||||||
return None
|
return None
|
||||||
@ -248,7 +241,7 @@ def find_user_pyproject_toml() -> Path:
|
|||||||
def get_gitignore(root: Path) -> PathSpec:
|
def get_gitignore(root: Path) -> PathSpec:
|
||||||
"""Return a PathSpec matching gitignore content if present."""
|
"""Return a PathSpec matching gitignore content if present."""
|
||||||
gitignore = root / ".gitignore"
|
gitignore = root / ".gitignore"
|
||||||
lines: List[str] = []
|
lines: list[str] = []
|
||||||
if gitignore.is_file():
|
if gitignore.is_file():
|
||||||
with gitignore.open(encoding="utf-8") as gf:
|
with gitignore.open(encoding="utf-8") as gf:
|
||||||
lines = gf.readlines()
|
lines = gf.readlines()
|
||||||
@ -269,8 +262,6 @@ def resolves_outside_root_or_cannot_stat(
|
|||||||
root directory. Also returns True if we failed to resolve the path.
|
root directory. Also returns True if we failed to resolve the path.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if sys.version_info < (3, 8, 6):
|
|
||||||
path = path.absolute() # https://bugs.python.org/issue33660
|
|
||||||
resolved_path = _cached_resolve(path)
|
resolved_path = _cached_resolve(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if report:
|
if report:
|
||||||
@ -301,7 +292,7 @@ def best_effort_relative_path(path: Path, root: Path) -> Path:
|
|||||||
def _path_is_ignored(
|
def _path_is_ignored(
|
||||||
root_relative_path: str,
|
root_relative_path: str,
|
||||||
root: Path,
|
root: Path,
|
||||||
gitignore_dict: Dict[Path, PathSpec],
|
gitignore_dict: dict[Path, PathSpec],
|
||||||
) -> bool:
|
) -> bool:
|
||||||
path = root / root_relative_path
|
path = root / root_relative_path
|
||||||
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
||||||
@ -309,6 +300,8 @@ def _path_is_ignored(
|
|||||||
for gitignore_path, pattern in gitignore_dict.items():
|
for gitignore_path, pattern in gitignore_dict.items():
|
||||||
try:
|
try:
|
||||||
relative_path = path.relative_to(gitignore_path).as_posix()
|
relative_path = path.relative_to(gitignore_path).as_posix()
|
||||||
|
if path.is_dir():
|
||||||
|
relative_path = relative_path + "/"
|
||||||
except ValueError:
|
except ValueError:
|
||||||
break
|
break
|
||||||
if pattern.match_file(relative_path):
|
if pattern.match_file(relative_path):
|
||||||
@ -332,7 +325,7 @@ def gen_python_files(
|
|||||||
extend_exclude: Optional[Pattern[str]],
|
extend_exclude: Optional[Pattern[str]],
|
||||||
force_exclude: Optional[Pattern[str]],
|
force_exclude: Optional[Pattern[str]],
|
||||||
report: Report,
|
report: Report,
|
||||||
gitignore_dict: Optional[Dict[Path, PathSpec]],
|
gitignore_dict: Optional[dict[Path, PathSpec]],
|
||||||
*,
|
*,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
|
@ -3,17 +3,19 @@
|
|||||||
import ast
|
import ast
|
||||||
import collections
|
import collections
|
||||||
import dataclasses
|
import dataclasses
|
||||||
|
import re
|
||||||
import secrets
|
import secrets
|
||||||
import sys
|
import sys
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from importlib.util import find_spec
|
from importlib.util import find_spec
|
||||||
from typing import Dict, List, Optional, Tuple
|
from typing import Optional
|
||||||
|
|
||||||
if sys.version_info >= (3, 10):
|
if sys.version_info >= (3, 10):
|
||||||
from typing import TypeGuard
|
from typing import TypeGuard
|
||||||
else:
|
else:
|
||||||
from typing_extensions import TypeGuard
|
from typing_extensions import TypeGuard
|
||||||
|
|
||||||
|
from black.mode import Mode
|
||||||
from black.output import out
|
from black.output import out
|
||||||
from black.report import NothingChanged
|
from black.report import NothingChanged
|
||||||
|
|
||||||
@ -41,7 +43,6 @@
|
|||||||
"time",
|
"time",
|
||||||
"timeit",
|
"timeit",
|
||||||
))
|
))
|
||||||
TOKEN_HEX = secrets.token_hex
|
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass(frozen=True)
|
@dataclasses.dataclass(frozen=True)
|
||||||
@ -64,7 +65,35 @@ def jupyter_dependencies_are_installed(*, warn: bool) -> bool:
|
|||||||
return installed
|
return installed
|
||||||
|
|
||||||
|
|
||||||
def remove_trailing_semicolon(src: str) -> Tuple[str, bool]:
|
def validate_cell(src: str, mode: Mode) -> None:
|
||||||
|
"""Check that cell does not already contain TransformerManager transformations,
|
||||||
|
or non-Python cell magics, which might cause tokenizer_rt to break because of
|
||||||
|
indentations.
|
||||||
|
|
||||||
|
If a cell contains ``!ls``, then it'll be transformed to
|
||||||
|
``get_ipython().system('ls')``. However, if the cell originally contained
|
||||||
|
``get_ipython().system('ls')``, then it would get transformed in the same way:
|
||||||
|
|
||||||
|
>>> TransformerManager().transform_cell("get_ipython().system('ls')")
|
||||||
|
"get_ipython().system('ls')\n"
|
||||||
|
>>> TransformerManager().transform_cell("!ls")
|
||||||
|
"get_ipython().system('ls')\n"
|
||||||
|
|
||||||
|
Due to the impossibility of safely roundtripping in such situations, cells
|
||||||
|
containing transformed magics will be ignored.
|
||||||
|
"""
|
||||||
|
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
|
||||||
|
raise NothingChanged
|
||||||
|
|
||||||
|
line = _get_code_start(src)
|
||||||
|
if line.startswith("%%") and (
|
||||||
|
line.split(maxsplit=1)[0][2:]
|
||||||
|
not in PYTHON_CELL_MAGICS | mode.python_cell_magics
|
||||||
|
):
|
||||||
|
raise NothingChanged
|
||||||
|
|
||||||
|
|
||||||
|
def remove_trailing_semicolon(src: str) -> tuple[str, bool]:
|
||||||
"""Remove trailing semicolon from Jupyter notebook cell.
|
"""Remove trailing semicolon from Jupyter notebook cell.
|
||||||
|
|
||||||
For example,
|
For example,
|
||||||
@ -120,7 +149,7 @@ def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
|
|||||||
return str(tokens_to_src(tokens))
|
return str(tokens_to_src(tokens))
|
||||||
|
|
||||||
|
|
||||||
def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
def mask_cell(src: str) -> tuple[str, list[Replacement]]:
|
||||||
"""Mask IPython magics so content becomes parseable Python code.
|
"""Mask IPython magics so content becomes parseable Python code.
|
||||||
|
|
||||||
For example,
|
For example,
|
||||||
@ -130,12 +159,12 @@ def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
|
|
||||||
becomes
|
becomes
|
||||||
|
|
||||||
"25716f358c32750e"
|
b"25716f358c32750"
|
||||||
'foo'
|
'foo'
|
||||||
|
|
||||||
The replacements are returned, along with the transformed code.
|
The replacements are returned, along with the transformed code.
|
||||||
"""
|
"""
|
||||||
replacements: List[Replacement] = []
|
replacements: list[Replacement] = []
|
||||||
try:
|
try:
|
||||||
ast.parse(src)
|
ast.parse(src)
|
||||||
except SyntaxError:
|
except SyntaxError:
|
||||||
@ -148,18 +177,32 @@ def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
from IPython.core.inputtransformer2 import TransformerManager
|
from IPython.core.inputtransformer2 import TransformerManager
|
||||||
|
|
||||||
transformer_manager = TransformerManager()
|
transformer_manager = TransformerManager()
|
||||||
|
# A side effect of the following transformation is that it also removes any
|
||||||
|
# empty lines at the beginning of the cell.
|
||||||
transformed = transformer_manager.transform_cell(src)
|
transformed = transformer_manager.transform_cell(src)
|
||||||
transformed, cell_magic_replacements = replace_cell_magics(transformed)
|
transformed, cell_magic_replacements = replace_cell_magics(transformed)
|
||||||
replacements += cell_magic_replacements
|
replacements += cell_magic_replacements
|
||||||
transformed = transformer_manager.transform_cell(transformed)
|
transformed = transformer_manager.transform_cell(transformed)
|
||||||
transformed, magic_replacements = replace_magics(transformed)
|
transformed, magic_replacements = replace_magics(transformed)
|
||||||
if len(transformed.splitlines()) != len(src.splitlines()):
|
if len(transformed.strip().splitlines()) != len(src.strip().splitlines()):
|
||||||
# Multi-line magic, not supported.
|
# Multi-line magic, not supported.
|
||||||
raise NothingChanged
|
raise NothingChanged
|
||||||
replacements += magic_replacements
|
replacements += magic_replacements
|
||||||
return transformed, replacements
|
return transformed, replacements
|
||||||
|
|
||||||
|
|
||||||
|
def create_token(n_chars: int) -> str:
|
||||||
|
"""Create a randomly generated token that is n_chars characters long."""
|
||||||
|
assert n_chars > 0
|
||||||
|
n_bytes = max(n_chars // 2 - 1, 1)
|
||||||
|
token = secrets.token_hex(n_bytes)
|
||||||
|
if len(token) + 3 > n_chars:
|
||||||
|
token = token[:-1]
|
||||||
|
# We use a bytestring so that the string does not get interpreted
|
||||||
|
# as a docstring.
|
||||||
|
return f'b"{token}"'
|
||||||
|
|
||||||
|
|
||||||
def get_token(src: str, magic: str) -> str:
|
def get_token(src: str, magic: str) -> str:
|
||||||
"""Return randomly generated token to mask IPython magic with.
|
"""Return randomly generated token to mask IPython magic with.
|
||||||
|
|
||||||
@ -169,11 +212,11 @@ def get_token(src: str, magic: str) -> str:
|
|||||||
not already present anywhere else in the cell.
|
not already present anywhere else in the cell.
|
||||||
"""
|
"""
|
||||||
assert magic
|
assert magic
|
||||||
nbytes = max(len(magic) // 2 - 1, 1)
|
n_chars = len(magic)
|
||||||
token = TOKEN_HEX(nbytes)
|
token = create_token(n_chars)
|
||||||
counter = 0
|
counter = 0
|
||||||
while token in src:
|
while token in src:
|
||||||
token = TOKEN_HEX(nbytes)
|
token = create_token(n_chars)
|
||||||
counter += 1
|
counter += 1
|
||||||
if counter > 100:
|
if counter > 100:
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
@ -181,12 +224,10 @@ def get_token(src: str, magic: str) -> str:
|
|||||||
"Please report a bug on https://github.com/psf/black/issues. "
|
"Please report a bug on https://github.com/psf/black/issues. "
|
||||||
f"The magic might be helpful: {magic}"
|
f"The magic might be helpful: {magic}"
|
||||||
) from None
|
) from None
|
||||||
if len(token) + 2 < len(magic):
|
return token
|
||||||
token = f"{token}."
|
|
||||||
return f'"{token}"'
|
|
||||||
|
|
||||||
|
|
||||||
def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||||
"""Replace cell magic with token.
|
"""Replace cell magic with token.
|
||||||
|
|
||||||
Note that 'src' will already have been processed by IPython's
|
Note that 'src' will already have been processed by IPython's
|
||||||
@ -203,7 +244,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
|
|
||||||
The replacement, along with the transformed code, is returned.
|
The replacement, along with the transformed code, is returned.
|
||||||
"""
|
"""
|
||||||
replacements: List[Replacement] = []
|
replacements: list[Replacement] = []
|
||||||
|
|
||||||
tree = ast.parse(src)
|
tree = ast.parse(src)
|
||||||
|
|
||||||
@ -217,7 +258,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
||||||
|
|
||||||
|
|
||||||
def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
def replace_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||||
"""Replace magics within body of cell.
|
"""Replace magics within body of cell.
|
||||||
|
|
||||||
Note that 'src' will already have been processed by IPython's
|
Note that 'src' will already have been processed by IPython's
|
||||||
@ -239,7 +280,7 @@ def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
magic_finder = MagicFinder()
|
magic_finder = MagicFinder()
|
||||||
magic_finder.visit(ast.parse(src))
|
magic_finder.visit(ast.parse(src))
|
||||||
new_srcs = []
|
new_srcs = []
|
||||||
for i, line in enumerate(src.splitlines(), start=1):
|
for i, line in enumerate(src.split("\n"), start=1):
|
||||||
if i in magic_finder.magics:
|
if i in magic_finder.magics:
|
||||||
offsets_and_magics = magic_finder.magics[i]
|
offsets_and_magics = magic_finder.magics[i]
|
||||||
if len(offsets_and_magics) != 1: # pragma: nocover
|
if len(offsets_and_magics) != 1: # pragma: nocover
|
||||||
@ -258,7 +299,7 @@ def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
|
|||||||
return "\n".join(new_srcs), replacements
|
return "\n".join(new_srcs), replacements
|
||||||
|
|
||||||
|
|
||||||
def unmask_cell(src: str, replacements: List[Replacement]) -> str:
|
def unmask_cell(src: str, replacements: list[Replacement]) -> str:
|
||||||
"""Remove replacements from cell.
|
"""Remove replacements from cell.
|
||||||
|
|
||||||
For example
|
For example
|
||||||
@ -276,6 +317,21 @@ def unmask_cell(src: str, replacements: List[Replacement]) -> str:
|
|||||||
return src
|
return src
|
||||||
|
|
||||||
|
|
||||||
|
def _get_code_start(src: str) -> str:
|
||||||
|
"""Provides the first line where the code starts.
|
||||||
|
|
||||||
|
Iterates over lines of code until it finds the first line that doesn't
|
||||||
|
contain only empty spaces and comments. It removes any empty spaces at the
|
||||||
|
start of the line and returns it. If such line doesn't exist, it returns an
|
||||||
|
empty string.
|
||||||
|
"""
|
||||||
|
for match in re.finditer(".+", src):
|
||||||
|
line = match.group(0).lstrip()
|
||||||
|
if line and not line.startswith("#"):
|
||||||
|
return line
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
||||||
"""Check if attribute is IPython magic.
|
"""Check if attribute is IPython magic.
|
||||||
|
|
||||||
@ -291,11 +347,11 @@ def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _get_str_args(args: List[ast.expr]) -> List[str]:
|
def _get_str_args(args: list[ast.expr]) -> list[str]:
|
||||||
str_args = []
|
str_args = []
|
||||||
for arg in args:
|
for arg in args:
|
||||||
assert isinstance(arg, ast.Str)
|
assert isinstance(arg, ast.Constant) and isinstance(arg.value, str)
|
||||||
str_args.append(arg.s)
|
str_args.append(arg.value)
|
||||||
return str_args
|
return str_args
|
||||||
|
|
||||||
|
|
||||||
@ -375,7 +431,7 @@ class MagicFinder(ast.NodeVisitor):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list)
|
self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list)
|
||||||
|
|
||||||
def visit_Assign(self, node: ast.Assign) -> None:
|
def visit_Assign(self, node: ast.Assign) -> None:
|
||||||
"""Look for system assign magics.
|
"""Look for system assign magics.
|
||||||
|
@ -4,10 +4,11 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
from collections.abc import Collection, Iterator
|
||||||
from dataclasses import replace
|
from dataclasses import replace
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from functools import partial, wraps
|
from functools import partial, wraps
|
||||||
from typing import Collection, Iterator, List, Optional, Set, Union, cast
|
from typing import Optional, Union, cast
|
||||||
|
|
||||||
from black.brackets import (
|
from black.brackets import (
|
||||||
COMMA_PRIORITY,
|
COMMA_PRIORITY,
|
||||||
@ -39,11 +40,13 @@
|
|||||||
ensure_visible,
|
ensure_visible,
|
||||||
fstring_to_string,
|
fstring_to_string,
|
||||||
get_annotation_type,
|
get_annotation_type,
|
||||||
|
has_sibling_with_type,
|
||||||
is_arith_like,
|
is_arith_like,
|
||||||
is_async_stmt_or_funcdef,
|
is_async_stmt_or_funcdef,
|
||||||
is_atom_with_invisible_parens,
|
is_atom_with_invisible_parens,
|
||||||
is_docstring,
|
is_docstring,
|
||||||
is_empty_tuple,
|
is_empty_tuple,
|
||||||
|
is_generator,
|
||||||
is_lpar_token,
|
is_lpar_token,
|
||||||
is_multiline_string,
|
is_multiline_string,
|
||||||
is_name_token,
|
is_name_token,
|
||||||
@ -54,6 +57,8 @@
|
|||||||
is_rpar_token,
|
is_rpar_token,
|
||||||
is_stub_body,
|
is_stub_body,
|
||||||
is_stub_suite,
|
is_stub_suite,
|
||||||
|
is_tuple,
|
||||||
|
is_tuple_containing_star,
|
||||||
is_tuple_containing_walrus,
|
is_tuple_containing_walrus,
|
||||||
is_type_ignore_comment_string,
|
is_type_ignore_comment_string,
|
||||||
is_vararg,
|
is_vararg,
|
||||||
@ -64,7 +69,7 @@
|
|||||||
)
|
)
|
||||||
from black.numerics import normalize_numeric_literal
|
from black.numerics import normalize_numeric_literal
|
||||||
from black.strings import (
|
from black.strings import (
|
||||||
fix_docstring,
|
fix_multiline_docstring,
|
||||||
get_string_prefix,
|
get_string_prefix,
|
||||||
normalize_string_prefix,
|
normalize_string_prefix,
|
||||||
normalize_string_quotes,
|
normalize_string_quotes,
|
||||||
@ -197,7 +202,7 @@ def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
|
|||||||
yield from self.line(-1)
|
yield from self.line(-1)
|
||||||
|
|
||||||
def visit_stmt(
|
def visit_stmt(
|
||||||
self, node: Node, keywords: Set[str], parens: Set[str]
|
self, node: Node, keywords: set[str], parens: set[str]
|
||||||
) -> Iterator[Line]:
|
) -> Iterator[Line]:
|
||||||
"""Visit a statement.
|
"""Visit a statement.
|
||||||
|
|
||||||
@ -411,10 +416,9 @@ def foo(a: (int), b: (float) = 7): ...
|
|||||||
yield from self.visit_default(node)
|
yield from self.visit_default(node)
|
||||||
|
|
||||||
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
||||||
if Preview.hex_codes_in_unicode_sequences in self.mode:
|
|
||||||
normalize_unicode_escape_sequences(leaf)
|
normalize_unicode_escape_sequences(leaf)
|
||||||
|
|
||||||
if is_docstring(leaf, self.mode) and not re.search(r"\\\s*\n", leaf.value):
|
if is_docstring(leaf) and not re.search(r"\\\s*\n", leaf.value):
|
||||||
# We're ignoring docstrings with backslash newline escapes because changing
|
# We're ignoring docstrings with backslash newline escapes because changing
|
||||||
# indentation of those changes the AST representation of the code.
|
# indentation of those changes the AST representation of the code.
|
||||||
if self.mode.string_normalization:
|
if self.mode.string_normalization:
|
||||||
@ -441,7 +445,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
|||||||
indent = " " * 4 * self.current_line.depth
|
indent = " " * 4 * self.current_line.depth
|
||||||
|
|
||||||
if is_multiline_string(leaf):
|
if is_multiline_string(leaf):
|
||||||
docstring = fix_docstring(docstring, indent)
|
docstring = fix_multiline_docstring(docstring, indent)
|
||||||
else:
|
else:
|
||||||
docstring = docstring.strip()
|
docstring = docstring.strip()
|
||||||
|
|
||||||
@ -485,10 +489,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
|||||||
and len(indent) + quote_len <= self.mode.line_length
|
and len(indent) + quote_len <= self.mode.line_length
|
||||||
and not has_trailing_backslash
|
and not has_trailing_backslash
|
||||||
):
|
):
|
||||||
if (
|
if leaf.value[-1 - quote_len] == "\n":
|
||||||
Preview.docstring_check_for_newline in self.mode
|
|
||||||
and leaf.value[-1 - quote_len] == "\n"
|
|
||||||
):
|
|
||||||
leaf.value = prefix + quote + docstring + quote
|
leaf.value = prefix + quote + docstring + quote
|
||||||
else:
|
else:
|
||||||
leaf.value = prefix + quote + docstring + "\n" + indent + quote
|
leaf.value = prefix + quote + docstring + "\n" + indent + quote
|
||||||
@ -506,10 +507,32 @@ def visit_NUMBER(self, leaf: Leaf) -> Iterator[Line]:
|
|||||||
normalize_numeric_literal(leaf)
|
normalize_numeric_literal(leaf)
|
||||||
yield from self.visit_default(leaf)
|
yield from self.visit_default(leaf)
|
||||||
|
|
||||||
|
def visit_atom(self, node: Node) -> Iterator[Line]:
|
||||||
|
"""Visit any atom"""
|
||||||
|
if len(node.children) == 3:
|
||||||
|
first = node.children[0]
|
||||||
|
last = node.children[-1]
|
||||||
|
if (first.type == token.LSQB and last.type == token.RSQB) or (
|
||||||
|
first.type == token.LBRACE and last.type == token.RBRACE
|
||||||
|
):
|
||||||
|
# Lists or sets of one item
|
||||||
|
maybe_make_parens_invisible_in_atom(node.children[1], parent=node)
|
||||||
|
|
||||||
|
yield from self.visit_default(node)
|
||||||
|
|
||||||
def visit_fstring(self, node: Node) -> Iterator[Line]:
|
def visit_fstring(self, node: Node) -> Iterator[Line]:
|
||||||
# currently we don't want to format and split f-strings at all.
|
# currently we don't want to format and split f-strings at all.
|
||||||
string_leaf = fstring_to_string(node)
|
string_leaf = fstring_to_string(node)
|
||||||
node.replace(string_leaf)
|
node.replace(string_leaf)
|
||||||
|
if "\\" in string_leaf.value and any(
|
||||||
|
"\\" in str(child)
|
||||||
|
for child in node.children
|
||||||
|
if child.type == syms.fstring_replacement_field
|
||||||
|
):
|
||||||
|
# string normalization doesn't account for nested quotes,
|
||||||
|
# causing breakages. skip normalization when nested quotes exist
|
||||||
|
yield from self.visit_default(string_leaf)
|
||||||
|
return
|
||||||
yield from self.visit_STRING(string_leaf)
|
yield from self.visit_STRING(string_leaf)
|
||||||
|
|
||||||
# TODO: Uncomment Implementation to format f-string children
|
# TODO: Uncomment Implementation to format f-string children
|
||||||
@ -550,7 +573,7 @@ def __post_init__(self) -> None:
|
|||||||
self.current_line = Line(mode=self.mode)
|
self.current_line = Line(mode=self.mode)
|
||||||
|
|
||||||
v = self.visit_stmt
|
v = self.visit_stmt
|
||||||
Ø: Set[str] = set()
|
Ø: set[str] = set()
|
||||||
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
|
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
|
||||||
self.visit_if_stmt = partial(
|
self.visit_if_stmt = partial(
|
||||||
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
|
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
|
||||||
@ -574,7 +597,6 @@ def __post_init__(self) -> None:
|
|||||||
# PEP 634
|
# PEP 634
|
||||||
self.visit_match_stmt = self.visit_match_case
|
self.visit_match_stmt = self.visit_match_case
|
||||||
self.visit_case_block = self.visit_match_case
|
self.visit_case_block = self.visit_match_case
|
||||||
if Preview.remove_redundant_guard_parens in self.mode:
|
|
||||||
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
|
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
|
||||||
|
|
||||||
|
|
||||||
@ -617,7 +639,7 @@ def transform_line(
|
|||||||
string_split = StringSplitter(ll, sn)
|
string_split = StringSplitter(ll, sn)
|
||||||
string_paren_wrap = StringParenWrapper(ll, sn)
|
string_paren_wrap = StringParenWrapper(ll, sn)
|
||||||
|
|
||||||
transformers: List[Transformer]
|
transformers: list[Transformer]
|
||||||
if (
|
if (
|
||||||
not line.contains_uncollapsable_type_comments()
|
not line.contains_uncollapsable_type_comments()
|
||||||
and not line.should_split_rhs
|
and not line.should_split_rhs
|
||||||
@ -717,7 +739,7 @@ def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool:
|
|||||||
"""If a funcdef has a magic trailing comma in the return type, then we should first
|
"""If a funcdef has a magic trailing comma in the return type, then we should first
|
||||||
split the line with rhs to respect the comma.
|
split the line with rhs to respect the comma.
|
||||||
"""
|
"""
|
||||||
return_type_leaves: List[Leaf] = []
|
return_type_leaves: list[Leaf] = []
|
||||||
in_return_type = False
|
in_return_type = False
|
||||||
|
|
||||||
for leaf in line.leaves:
|
for leaf in line.leaves:
|
||||||
@ -759,9 +781,10 @@ def left_hand_split(
|
|||||||
Prefer RHS otherwise. This is why this function is not symmetrical with
|
Prefer RHS otherwise. This is why this function is not symmetrical with
|
||||||
:func:`right_hand_split` which also handles optional parentheses.
|
:func:`right_hand_split` which also handles optional parentheses.
|
||||||
"""
|
"""
|
||||||
tail_leaves: List[Leaf] = []
|
for leaf_type in [token.LPAR, token.LSQB]:
|
||||||
body_leaves: List[Leaf] = []
|
tail_leaves: list[Leaf] = []
|
||||||
head_leaves: List[Leaf] = []
|
body_leaves: list[Leaf] = []
|
||||||
|
head_leaves: list[Leaf] = []
|
||||||
current_leaves = head_leaves
|
current_leaves = head_leaves
|
||||||
matching_bracket: Optional[Leaf] = None
|
matching_bracket: Optional[Leaf] = None
|
||||||
for leaf in line.leaves:
|
for leaf in line.leaves:
|
||||||
@ -776,9 +799,11 @@ def left_hand_split(
|
|||||||
current_leaves = tail_leaves if body_leaves else head_leaves
|
current_leaves = tail_leaves if body_leaves else head_leaves
|
||||||
current_leaves.append(leaf)
|
current_leaves.append(leaf)
|
||||||
if current_leaves is head_leaves:
|
if current_leaves is head_leaves:
|
||||||
if leaf.type in OPENING_BRACKETS:
|
if leaf.type == leaf_type:
|
||||||
matching_bracket = leaf
|
matching_bracket = leaf
|
||||||
current_leaves = body_leaves
|
current_leaves = body_leaves
|
||||||
|
if matching_bracket and tail_leaves:
|
||||||
|
break
|
||||||
if not matching_bracket or not tail_leaves:
|
if not matching_bracket or not tail_leaves:
|
||||||
raise CannotSplit("No brackets found")
|
raise CannotSplit("No brackets found")
|
||||||
|
|
||||||
@ -827,9 +852,9 @@ def _first_right_hand_split(
|
|||||||
_maybe_split_omitting_optional_parens to get an opinion whether to prefer
|
_maybe_split_omitting_optional_parens to get an opinion whether to prefer
|
||||||
splitting on the right side of an assignment statement.
|
splitting on the right side of an assignment statement.
|
||||||
"""
|
"""
|
||||||
tail_leaves: List[Leaf] = []
|
tail_leaves: list[Leaf] = []
|
||||||
body_leaves: List[Leaf] = []
|
body_leaves: list[Leaf] = []
|
||||||
head_leaves: List[Leaf] = []
|
head_leaves: list[Leaf] = []
|
||||||
current_leaves = tail_leaves
|
current_leaves = tail_leaves
|
||||||
opening_bracket: Optional[Leaf] = None
|
opening_bracket: Optional[Leaf] = None
|
||||||
closing_bracket: Optional[Leaf] = None
|
closing_bracket: Optional[Leaf] = None
|
||||||
@ -860,8 +885,8 @@ def _first_right_hand_split(
|
|||||||
and tail_leaves[0].opening_bracket is head_leaves[-1]
|
and tail_leaves[0].opening_bracket is head_leaves[-1]
|
||||||
):
|
):
|
||||||
inner_body_leaves = list(body_leaves)
|
inner_body_leaves = list(body_leaves)
|
||||||
hugged_opening_leaves: List[Leaf] = []
|
hugged_opening_leaves: list[Leaf] = []
|
||||||
hugged_closing_leaves: List[Leaf] = []
|
hugged_closing_leaves: list[Leaf] = []
|
||||||
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
|
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
|
||||||
unpacking_offset: int = 1 if is_unpacking else 0
|
unpacking_offset: int = 1 if is_unpacking else 0
|
||||||
while (
|
while (
|
||||||
@ -945,29 +970,7 @@ def _maybe_split_omitting_optional_parens(
|
|||||||
try:
|
try:
|
||||||
# The RHSResult Omitting Optional Parens.
|
# The RHSResult Omitting Optional Parens.
|
||||||
rhs_oop = _first_right_hand_split(line, omit=omit)
|
rhs_oop = _first_right_hand_split(line, omit=omit)
|
||||||
is_split_right_after_equal = (
|
if _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode):
|
||||||
len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL
|
|
||||||
)
|
|
||||||
rhs_head_contains_brackets = any(
|
|
||||||
leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]
|
|
||||||
)
|
|
||||||
# the -1 is for the ending optional paren
|
|
||||||
rhs_head_short_enough = is_line_short_enough(
|
|
||||||
rhs.head, mode=replace(mode, line_length=mode.line_length - 1)
|
|
||||||
)
|
|
||||||
rhs_head_explode_blocked_by_magic_trailing_comma = (
|
|
||||||
rhs.head.magic_trailing_comma is None
|
|
||||||
)
|
|
||||||
if (
|
|
||||||
not (
|
|
||||||
is_split_right_after_equal
|
|
||||||
and rhs_head_contains_brackets
|
|
||||||
and rhs_head_short_enough
|
|
||||||
and rhs_head_explode_blocked_by_magic_trailing_comma
|
|
||||||
)
|
|
||||||
# the omit optional parens split is preferred by some other reason
|
|
||||||
or _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode)
|
|
||||||
):
|
|
||||||
yield from _maybe_split_omitting_optional_parens(
|
yield from _maybe_split_omitting_optional_parens(
|
||||||
rhs_oop, line, mode, features=features, omit=omit
|
rhs_oop, line, mode, features=features, omit=omit
|
||||||
)
|
)
|
||||||
@ -978,8 +981,15 @@ def _maybe_split_omitting_optional_parens(
|
|||||||
if line.is_chained_assignment:
|
if line.is_chained_assignment:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
elif not can_be_split(rhs.body) and not is_line_short_enough(
|
elif (
|
||||||
rhs.body, mode=mode
|
not can_be_split(rhs.body)
|
||||||
|
and not is_line_short_enough(rhs.body, mode=mode)
|
||||||
|
and not (
|
||||||
|
Preview.wrap_long_dict_values_in_parens
|
||||||
|
and rhs.opening_bracket.parent
|
||||||
|
and rhs.opening_bracket.parent.parent
|
||||||
|
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
|
||||||
|
)
|
||||||
):
|
):
|
||||||
raise CannotSplit(
|
raise CannotSplit(
|
||||||
"Splitting failed, body is still too long and can't be split."
|
"Splitting failed, body is still too long and can't be split."
|
||||||
@ -1010,6 +1020,44 @@ def _prefer_split_rhs_oop_over_rhs(
|
|||||||
Returns whether we should prefer the result from a split omitting optional parens
|
Returns whether we should prefer the result from a split omitting optional parens
|
||||||
(rhs_oop) over the original (rhs).
|
(rhs_oop) over the original (rhs).
|
||||||
"""
|
"""
|
||||||
|
# contains unsplittable type ignore
|
||||||
|
if (
|
||||||
|
rhs_oop.head.contains_unsplittable_type_ignore()
|
||||||
|
or rhs_oop.body.contains_unsplittable_type_ignore()
|
||||||
|
or rhs_oop.tail.contains_unsplittable_type_ignore()
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Retain optional parens around dictionary values
|
||||||
|
if (
|
||||||
|
Preview.wrap_long_dict_values_in_parens
|
||||||
|
and rhs.opening_bracket.parent
|
||||||
|
and rhs.opening_bracket.parent.parent
|
||||||
|
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
|
||||||
|
and rhs.body.bracket_tracker.delimiters
|
||||||
|
):
|
||||||
|
# Unless the split is inside the key
|
||||||
|
return any(leaf.type == token.COLON for leaf in rhs_oop.tail.leaves)
|
||||||
|
|
||||||
|
# the split is right after `=`
|
||||||
|
if not (len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# the left side of assignment contains brackets
|
||||||
|
if not any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# the left side of assignment is short enough (the -1 is for the ending optional
|
||||||
|
# paren)
|
||||||
|
if not is_line_short_enough(
|
||||||
|
rhs.head, mode=replace(mode, line_length=mode.line_length - 1)
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# the left side of assignment won't explode further because of magic trailing comma
|
||||||
|
if rhs.head.magic_trailing_comma is not None:
|
||||||
|
return True
|
||||||
|
|
||||||
# If we have multiple targets, we prefer more `=`s on the head vs pushing them to
|
# If we have multiple targets, we prefer more `=`s on the head vs pushing them to
|
||||||
# the body
|
# the body
|
||||||
rhs_head_equal_count = [leaf.type for leaf in rhs.head.leaves].count(token.EQUAL)
|
rhs_head_equal_count = [leaf.type for leaf in rhs.head.leaves].count(token.EQUAL)
|
||||||
@ -1037,10 +1085,6 @@ def _prefer_split_rhs_oop_over_rhs(
|
|||||||
# the first line is short enough
|
# the first line is short enough
|
||||||
and is_line_short_enough(rhs_oop.head, mode=mode)
|
and is_line_short_enough(rhs_oop.head, mode=mode)
|
||||||
)
|
)
|
||||||
# contains unsplittable type ignore
|
|
||||||
or rhs_oop.head.contains_unsplittable_type_ignore()
|
|
||||||
or rhs_oop.body.contains_unsplittable_type_ignore()
|
|
||||||
or rhs_oop.tail.contains_unsplittable_type_ignore()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -1070,8 +1114,44 @@ def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _ensure_trailing_comma(
|
||||||
|
leaves: list[Leaf], original: Line, opening_bracket: Leaf
|
||||||
|
) -> bool:
|
||||||
|
if not leaves:
|
||||||
|
return False
|
||||||
|
# Ensure a trailing comma for imports
|
||||||
|
if original.is_import:
|
||||||
|
return True
|
||||||
|
# ...and standalone function arguments
|
||||||
|
if not original.is_def:
|
||||||
|
return False
|
||||||
|
if opening_bracket.value != "(":
|
||||||
|
return False
|
||||||
|
# Don't add commas if we already have any commas
|
||||||
|
if any(
|
||||||
|
leaf.type == token.COMMA and not is_part_of_annotation(leaf) for leaf in leaves
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Find a leaf with a parent (comments don't have parents)
|
||||||
|
leaf_with_parent = next((leaf for leaf in leaves if leaf.parent), None)
|
||||||
|
if leaf_with_parent is None:
|
||||||
|
return True
|
||||||
|
# Don't add commas inside parenthesized return annotations
|
||||||
|
if get_annotation_type(leaf_with_parent) == "return":
|
||||||
|
return False
|
||||||
|
# Don't add commas inside PEP 604 unions
|
||||||
|
if (
|
||||||
|
leaf_with_parent.parent
|
||||||
|
and leaf_with_parent.parent.next_sibling
|
||||||
|
and leaf_with_parent.parent.next_sibling.type == token.VBAR
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def bracket_split_build_line(
|
def bracket_split_build_line(
|
||||||
leaves: List[Leaf],
|
leaves: list[Leaf],
|
||||||
original: Line,
|
original: Line,
|
||||||
opening_bracket: Leaf,
|
opening_bracket: Leaf,
|
||||||
*,
|
*,
|
||||||
@ -1090,32 +1170,7 @@ def bracket_split_build_line(
|
|||||||
if component is _BracketSplitComponent.body:
|
if component is _BracketSplitComponent.body:
|
||||||
result.inside_brackets = True
|
result.inside_brackets = True
|
||||||
result.depth += 1
|
result.depth += 1
|
||||||
if leaves:
|
if _ensure_trailing_comma(leaves, original, opening_bracket):
|
||||||
no_commas = (
|
|
||||||
# Ensure a trailing comma for imports and standalone function arguments
|
|
||||||
original.is_def
|
|
||||||
# Don't add one after any comments or within type annotations
|
|
||||||
and opening_bracket.value == "("
|
|
||||||
# Don't add one if there's already one there
|
|
||||||
and not any(
|
|
||||||
leaf.type == token.COMMA
|
|
||||||
and (
|
|
||||||
Preview.typed_params_trailing_comma not in original.mode
|
|
||||||
or not is_part_of_annotation(leaf)
|
|
||||||
)
|
|
||||||
for leaf in leaves
|
|
||||||
)
|
|
||||||
# Don't add one inside parenthesized return annotations
|
|
||||||
and get_annotation_type(leaves[0]) != "return"
|
|
||||||
# Don't add one inside PEP 604 unions
|
|
||||||
and not (
|
|
||||||
leaves[0].parent
|
|
||||||
and leaves[0].parent.next_sibling
|
|
||||||
and leaves[0].parent.next_sibling.type == token.VBAR
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if original.is_import or no_commas:
|
|
||||||
for i in range(len(leaves) - 1, -1, -1):
|
for i in range(len(leaves) - 1, -1, -1):
|
||||||
if leaves[i].type == STANDALONE_COMMENT:
|
if leaves[i].type == STANDALONE_COMMENT:
|
||||||
continue
|
continue
|
||||||
@ -1125,7 +1180,7 @@ def bracket_split_build_line(
|
|||||||
leaves.insert(i + 1, new_comma)
|
leaves.insert(i + 1, new_comma)
|
||||||
break
|
break
|
||||||
|
|
||||||
leaves_to_track: Set[LeafID] = set()
|
leaves_to_track: set[LeafID] = set()
|
||||||
if component is _BracketSplitComponent.head:
|
if component is _BracketSplitComponent.head:
|
||||||
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
||||||
# Populate the line
|
# Populate the line
|
||||||
@ -1317,7 +1372,7 @@ def append_to_line(leaf: Leaf) -> Iterator[Line]:
|
|||||||
|
|
||||||
|
|
||||||
def normalize_invisible_parens( # noqa: C901
|
def normalize_invisible_parens( # noqa: C901
|
||||||
node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature]
|
node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Make existing optional parentheses invisible or create new ones.
|
"""Make existing optional parentheses invisible or create new ones.
|
||||||
|
|
||||||
@ -1355,11 +1410,7 @@ def normalize_invisible_parens( # noqa: C901
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Add parentheses around if guards in case blocks
|
# Add parentheses around if guards in case blocks
|
||||||
if (
|
if isinstance(child, Node) and child.type == syms.guard:
|
||||||
isinstance(child, Node)
|
|
||||||
and child.type == syms.guard
|
|
||||||
and Preview.parens_for_long_if_clauses_in_case_block in mode
|
|
||||||
):
|
|
||||||
normalize_invisible_parens(
|
normalize_invisible_parens(
|
||||||
child, parens_after={"if"}, mode=mode, features=features
|
child, parens_after={"if"}, mode=mode, features=features
|
||||||
)
|
)
|
||||||
@ -1577,6 +1628,12 @@ def maybe_make_parens_invisible_in_atom(
|
|||||||
node.type not in (syms.atom, syms.expr)
|
node.type not in (syms.atom, syms.expr)
|
||||||
or is_empty_tuple(node)
|
or is_empty_tuple(node)
|
||||||
or is_one_tuple(node)
|
or is_one_tuple(node)
|
||||||
|
or (is_tuple(node) and parent.type == syms.asexpr_test)
|
||||||
|
or (
|
||||||
|
is_tuple(node)
|
||||||
|
and parent.type == syms.with_stmt
|
||||||
|
and has_sibling_with_type(node, token.COMMA)
|
||||||
|
)
|
||||||
or (is_yield(node) and parent.type != syms.expr_stmt)
|
or (is_yield(node) and parent.type != syms.expr_stmt)
|
||||||
or (
|
or (
|
||||||
# This condition tries to prevent removing non-optional brackets
|
# This condition tries to prevent removing non-optional brackets
|
||||||
@ -1586,6 +1643,8 @@ def maybe_make_parens_invisible_in_atom(
|
|||||||
and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
|
and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
|
||||||
)
|
)
|
||||||
or is_tuple_containing_walrus(node)
|
or is_tuple_containing_walrus(node)
|
||||||
|
or is_tuple_containing_star(node)
|
||||||
|
or is_generator(node)
|
||||||
):
|
):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -1598,6 +1657,7 @@ def maybe_make_parens_invisible_in_atom(
|
|||||||
syms.except_clause,
|
syms.except_clause,
|
||||||
syms.funcdef,
|
syms.funcdef,
|
||||||
syms.with_stmt,
|
syms.with_stmt,
|
||||||
|
syms.testlist_gexp,
|
||||||
syms.tname,
|
syms.tname,
|
||||||
# these ones aren't useful to end users, but they do please fuzzers
|
# these ones aren't useful to end users, but they do please fuzzers
|
||||||
syms.for_stmt,
|
syms.for_stmt,
|
||||||
@ -1617,9 +1677,6 @@ def maybe_make_parens_invisible_in_atom(
|
|||||||
not is_type_ignore_comment_string(middle.prefix.strip())
|
not is_type_ignore_comment_string(middle.prefix.strip())
|
||||||
):
|
):
|
||||||
first.value = ""
|
first.value = ""
|
||||||
if first.prefix.strip():
|
|
||||||
# Preserve comments before first paren
|
|
||||||
middle.prefix = first.prefix + middle.prefix
|
|
||||||
last.value = ""
|
last.value = ""
|
||||||
maybe_make_parens_invisible_in_atom(
|
maybe_make_parens_invisible_in_atom(
|
||||||
middle,
|
middle,
|
||||||
@ -1631,6 +1688,13 @@ def maybe_make_parens_invisible_in_atom(
|
|||||||
# Strip the invisible parens from `middle` by replacing
|
# Strip the invisible parens from `middle` by replacing
|
||||||
# it with the child in-between the invisible parens
|
# it with the child in-between the invisible parens
|
||||||
middle.replace(middle.children[1])
|
middle.replace(middle.children[1])
|
||||||
|
|
||||||
|
if middle.children[0].prefix.strip():
|
||||||
|
# Preserve comments before first paren
|
||||||
|
middle.children[1].prefix = (
|
||||||
|
middle.children[0].prefix + middle.children[1].prefix
|
||||||
|
)
|
||||||
|
|
||||||
if middle.children[-1].prefix.strip():
|
if middle.children[-1].prefix.strip():
|
||||||
# Preserve comments before last paren
|
# Preserve comments before last paren
|
||||||
last.prefix = middle.children[-1].prefix + last.prefix
|
last.prefix = middle.children[-1].prefix + last.prefix
|
||||||
@ -1667,7 +1731,7 @@ def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
|
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]:
|
||||||
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
|
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
|
||||||
|
|
||||||
Brackets can be omitted if the entire trailer up to and including
|
Brackets can be omitted if the entire trailer up to and including
|
||||||
@ -1678,14 +1742,14 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf
|
|||||||
the one that needs to explode are omitted.
|
the one that needs to explode are omitted.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
omit: Set[LeafID] = set()
|
omit: set[LeafID] = set()
|
||||||
if not line.magic_trailing_comma:
|
if not line.magic_trailing_comma:
|
||||||
yield omit
|
yield omit
|
||||||
|
|
||||||
length = 4 * line.depth
|
length = 4 * line.depth
|
||||||
opening_bracket: Optional[Leaf] = None
|
opening_bracket: Optional[Leaf] = None
|
||||||
closing_bracket: Optional[Leaf] = None
|
closing_bracket: Optional[Leaf] = None
|
||||||
inner_brackets: Set[LeafID] = set()
|
inner_brackets: set[LeafID] = set()
|
||||||
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
|
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
|
||||||
length += leaf_length
|
length += leaf_length
|
||||||
if length > line_length:
|
if length > line_length:
|
||||||
@ -1750,10 +1814,10 @@ def run_transformer(
|
|||||||
features: Collection[Feature],
|
features: Collection[Feature],
|
||||||
*,
|
*,
|
||||||
line_str: str = "",
|
line_str: str = "",
|
||||||
) -> List[Line]:
|
) -> list[Line]:
|
||||||
if not line_str:
|
if not line_str:
|
||||||
line_str = line_to_string(line)
|
line_str = line_to_string(line)
|
||||||
result: List[Line] = []
|
result: list[Line] = []
|
||||||
for transformed_line in transform(line, features, mode):
|
for transformed_line in transform(line, features, mode):
|
||||||
if str(transformed_line).strip("\n") == line_str:
|
if str(transformed_line).strip("\n") == line_str:
|
||||||
raise CannotTransform("Line transformer returned an unchanged result")
|
raise CannotTransform("Line transformer returned an unchanged result")
|
||||||
|
@ -1,18 +1,8 @@
|
|||||||
import itertools
|
import itertools
|
||||||
import math
|
import math
|
||||||
|
from collections.abc import Callable, Iterator, Sequence
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import (
|
from typing import Optional, TypeVar, Union, cast
|
||||||
Callable,
|
|
||||||
Dict,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Sequence,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
|
from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
|
||||||
from black.mode import Mode, Preview
|
from black.mode import Mode, Preview
|
||||||
@ -52,9 +42,9 @@ class Line:
|
|||||||
|
|
||||||
mode: Mode = field(repr=False)
|
mode: Mode = field(repr=False)
|
||||||
depth: int = 0
|
depth: int = 0
|
||||||
leaves: List[Leaf] = field(default_factory=list)
|
leaves: list[Leaf] = field(default_factory=list)
|
||||||
# keys ordered like `leaves`
|
# keys ordered like `leaves`
|
||||||
comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
|
comments: dict[LeafID, list[Leaf]] = field(default_factory=dict)
|
||||||
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
|
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
|
||||||
inside_brackets: bool = False
|
inside_brackets: bool = False
|
||||||
should_split_rhs: bool = False
|
should_split_rhs: bool = False
|
||||||
@ -214,9 +204,7 @@ def _is_triple_quoted_string(self) -> bool:
|
|||||||
@property
|
@property
|
||||||
def is_docstring(self) -> bool:
|
def is_docstring(self) -> bool:
|
||||||
"""Is the line a docstring?"""
|
"""Is the line a docstring?"""
|
||||||
if Preview.unify_docstring_detection not in self.mode:
|
return bool(self) and is_docstring(self.leaves[0])
|
||||||
return self._is_triple_quoted_string
|
|
||||||
return bool(self) and is_docstring(self.leaves[0], self.mode)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_chained_assignment(self) -> bool:
|
def is_chained_assignment(self) -> bool:
|
||||||
@ -426,7 +414,7 @@ def append_comment(self, comment: Leaf) -> bool:
|
|||||||
self.comments.setdefault(id(last_leaf), []).append(comment)
|
self.comments.setdefault(id(last_leaf), []).append(comment)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def comments_after(self, leaf: Leaf) -> List[Leaf]:
|
def comments_after(self, leaf: Leaf) -> list[Leaf]:
|
||||||
"""Generate comments that should appear directly after `leaf`."""
|
"""Generate comments that should appear directly after `leaf`."""
|
||||||
return self.comments.get(id(leaf), [])
|
return self.comments.get(id(leaf), [])
|
||||||
|
|
||||||
@ -459,13 +447,13 @@ def is_complex_subscript(self, leaf: Leaf) -> bool:
|
|||||||
|
|
||||||
def enumerate_with_length(
|
def enumerate_with_length(
|
||||||
self, is_reversed: bool = False
|
self, is_reversed: bool = False
|
||||||
) -> Iterator[Tuple[Index, Leaf, int]]:
|
) -> Iterator[tuple[Index, Leaf, int]]:
|
||||||
"""Return an enumeration of leaves with their length.
|
"""Return an enumeration of leaves with their length.
|
||||||
|
|
||||||
Stops prematurely on multiline strings and standalone comments.
|
Stops prematurely on multiline strings and standalone comments.
|
||||||
"""
|
"""
|
||||||
op = cast(
|
op = cast(
|
||||||
Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
|
Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]],
|
||||||
enumerate_reversed if is_reversed else enumerate,
|
enumerate_reversed if is_reversed else enumerate,
|
||||||
)
|
)
|
||||||
for index, leaf in op(self.leaves):
|
for index, leaf in op(self.leaves):
|
||||||
@ -531,11 +519,11 @@ class LinesBlock:
|
|||||||
previous_block: Optional["LinesBlock"]
|
previous_block: Optional["LinesBlock"]
|
||||||
original_line: Line
|
original_line: Line
|
||||||
before: int = 0
|
before: int = 0
|
||||||
content_lines: List[str] = field(default_factory=list)
|
content_lines: list[str] = field(default_factory=list)
|
||||||
after: int = 0
|
after: int = 0
|
||||||
form_feed: bool = False
|
form_feed: bool = False
|
||||||
|
|
||||||
def all_lines(self) -> List[str]:
|
def all_lines(self) -> list[str]:
|
||||||
empty_line = str(Line(mode=self.mode))
|
empty_line = str(Line(mode=self.mode))
|
||||||
prefix = make_simple_prefix(self.before, self.form_feed, empty_line)
|
prefix = make_simple_prefix(self.before, self.form_feed, empty_line)
|
||||||
return [prefix] + self.content_lines + [empty_line * self.after]
|
return [prefix] + self.content_lines + [empty_line * self.after]
|
||||||
@ -554,7 +542,7 @@ class EmptyLineTracker:
|
|||||||
mode: Mode
|
mode: Mode
|
||||||
previous_line: Optional[Line] = None
|
previous_line: Optional[Line] = None
|
||||||
previous_block: Optional[LinesBlock] = None
|
previous_block: Optional[LinesBlock] = None
|
||||||
previous_defs: List[Line] = field(default_factory=list)
|
previous_defs: list[Line] = field(default_factory=list)
|
||||||
semantic_leading_comment: Optional[LinesBlock] = None
|
semantic_leading_comment: Optional[LinesBlock] = None
|
||||||
|
|
||||||
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
||||||
@ -607,7 +595,7 @@ def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
|
|||||||
self.previous_block = block
|
self.previous_block = block
|
||||||
return block
|
return block
|
||||||
|
|
||||||
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C901
|
def _maybe_empty_lines(self, current_line: Line) -> tuple[int, int]: # noqa: C901
|
||||||
max_allowed = 1
|
max_allowed = 1
|
||||||
if current_line.depth == 0:
|
if current_line.depth == 0:
|
||||||
max_allowed = 1 if self.mode.is_pyi else 2
|
max_allowed = 1 if self.mode.is_pyi else 2
|
||||||
@ -681,6 +669,15 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C9
|
|||||||
current_line, before, user_had_newline
|
current_line, before, user_had_newline
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.previous_line.is_import
|
||||||
|
and self.previous_line.depth == 0
|
||||||
|
and current_line.depth == 0
|
||||||
|
and not current_line.is_import
|
||||||
|
and Preview.always_one_newline_after_import in self.mode
|
||||||
|
):
|
||||||
|
return 1, 0
|
||||||
|
|
||||||
if (
|
if (
|
||||||
self.previous_line.is_import
|
self.previous_line.is_import
|
||||||
and not current_line.is_import
|
and not current_line.is_import
|
||||||
@ -693,7 +690,7 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C9
|
|||||||
|
|
||||||
def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
||||||
self, current_line: Line, before: int, user_had_newline: bool
|
self, current_line: Line, before: int, user_had_newline: bool
|
||||||
) -> Tuple[int, int]:
|
) -> tuple[int, int]:
|
||||||
assert self.previous_line is not None
|
assert self.previous_line is not None
|
||||||
|
|
||||||
if self.previous_line.is_decorator:
|
if self.previous_line.is_decorator:
|
||||||
@ -772,7 +769,7 @@ def _maybe_empty_lines_for_class_or_def( # noqa: C901
|
|||||||
return newlines, 0
|
return newlines, 0
|
||||||
|
|
||||||
|
|
||||||
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]:
|
||||||
"""Like `reversed(enumerate(sequence))` if that were possible."""
|
"""Like `reversed(enumerate(sequence))` if that were possible."""
|
||||||
index = len(sequence) - 1
|
index = len(sequence) - 1
|
||||||
for element in reversed(sequence):
|
for element in reversed(sequence):
|
||||||
@ -781,7 +778,7 @@ def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
|
|||||||
|
|
||||||
|
|
||||||
def append_leaves(
|
def append_leaves(
|
||||||
new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
|
new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Append leaves (taken from @old_line) to @new_line, making sure to fix the
|
Append leaves (taken from @old_line) to @new_line, making sure to fix the
|
||||||
@ -838,10 +835,10 @@ def is_line_short_enough( # noqa: C901
|
|||||||
# Depth (which is based on the existing bracket_depth concept)
|
# Depth (which is based on the existing bracket_depth concept)
|
||||||
# is needed to determine nesting level of the MLS.
|
# is needed to determine nesting level of the MLS.
|
||||||
# Includes special case for trailing commas.
|
# Includes special case for trailing commas.
|
||||||
commas: List[int] = [] # tracks number of commas per depth level
|
commas: list[int] = [] # tracks number of commas per depth level
|
||||||
multiline_string: Optional[Leaf] = None
|
multiline_string: Optional[Leaf] = None
|
||||||
# store the leaves that contain parts of the MLS
|
# store the leaves that contain parts of the MLS
|
||||||
multiline_string_contexts: List[LN] = []
|
multiline_string_contexts: list[LN] = []
|
||||||
|
|
||||||
max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS
|
max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS
|
||||||
for i, leaf in enumerate(line.leaves):
|
for i, leaf in enumerate(line.leaves):
|
||||||
@ -865,7 +862,7 @@ def is_line_short_enough( # noqa: C901
|
|||||||
if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
|
if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
|
||||||
# Inside brackets, ignore trailing comma
|
# Inside brackets, ignore trailing comma
|
||||||
# directly after MLS/MLS-containing expression
|
# directly after MLS/MLS-containing expression
|
||||||
ignore_ctxs: List[Optional[LN]] = [None]
|
ignore_ctxs: list[Optional[LN]] = [None]
|
||||||
ignore_ctxs += multiline_string_contexts
|
ignore_ctxs += multiline_string_contexts
|
||||||
if (line.inside_brackets or leaf.bracket_depth > 0) and (
|
if (line.inside_brackets or leaf.bracket_depth > 0) and (
|
||||||
i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs
|
i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from operator import attrgetter
|
from operator import attrgetter
|
||||||
from typing import Dict, Final, Set
|
from typing import Final
|
||||||
|
|
||||||
from black.const import DEFAULT_LINE_LENGTH
|
from black.const import DEFAULT_LINE_LENGTH
|
||||||
|
|
||||||
@ -26,6 +26,10 @@ class TargetVersion(Enum):
|
|||||||
PY312 = 12
|
PY312 = 12
|
||||||
PY313 = 13
|
PY313 = 13
|
||||||
|
|
||||||
|
def pretty(self) -> str:
|
||||||
|
assert self.name[:2] == "PY"
|
||||||
|
return f"Python {self.name[2]}.{self.name[3:]}"
|
||||||
|
|
||||||
|
|
||||||
class Feature(Enum):
|
class Feature(Enum):
|
||||||
F_STRINGS = 2
|
F_STRINGS = 2
|
||||||
@ -60,7 +64,7 @@ class Feature(Enum):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
|
VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = {
|
||||||
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
||||||
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
||||||
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
||||||
@ -185,34 +189,26 @@ class Feature(Enum):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
|
def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool:
|
||||||
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
||||||
|
|
||||||
|
|
||||||
class Preview(Enum):
|
class Preview(Enum):
|
||||||
"""Individual preview style features."""
|
"""Individual preview style features."""
|
||||||
|
|
||||||
hex_codes_in_unicode_sequences = auto()
|
|
||||||
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
||||||
# for https://github.com/psf/black/issues/3117 to be fixed.
|
# for https://github.com/psf/black/issues/3117 to be fixed.
|
||||||
string_processing = auto()
|
string_processing = auto()
|
||||||
hug_parens_with_braces_and_square_brackets = auto()
|
hug_parens_with_braces_and_square_brackets = auto()
|
||||||
unify_docstring_detection = auto()
|
|
||||||
no_normalize_fmt_skip_whitespace = auto()
|
|
||||||
wrap_long_dict_values_in_parens = auto()
|
wrap_long_dict_values_in_parens = auto()
|
||||||
multiline_string_handling = auto()
|
multiline_string_handling = auto()
|
||||||
typed_params_trailing_comma = auto()
|
always_one_newline_after_import = auto()
|
||||||
is_simple_lookup_for_doublestar_expression = auto()
|
fix_fmt_skip_in_one_liners = auto()
|
||||||
docstring_check_for_newline = auto()
|
|
||||||
remove_redundant_guard_parens = auto()
|
|
||||||
parens_for_long_if_clauses_in_case_block = auto()
|
|
||||||
|
|
||||||
|
|
||||||
UNSTABLE_FEATURES: Set[Preview] = {
|
UNSTABLE_FEATURES: set[Preview] = {
|
||||||
# Many issues, see summary in https://github.com/psf/black/issues/4042
|
# Many issues, see summary in https://github.com/psf/black/issues/4042
|
||||||
Preview.string_processing,
|
Preview.string_processing,
|
||||||
# See issues #3452 and #4158
|
|
||||||
Preview.wrap_long_dict_values_in_parens,
|
|
||||||
# See issue #4159
|
# See issue #4159
|
||||||
Preview.multiline_string_handling,
|
Preview.multiline_string_handling,
|
||||||
# See issue #4036 (crash), #4098, #4099 (proposed tweaks)
|
# See issue #4036 (crash), #4098, #4099 (proposed tweaks)
|
||||||
@ -229,17 +225,17 @@ class Deprecated(UserWarning):
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Mode:
|
class Mode:
|
||||||
target_versions: Set[TargetVersion] = field(default_factory=set)
|
target_versions: set[TargetVersion] = field(default_factory=set)
|
||||||
line_length: int = DEFAULT_LINE_LENGTH
|
line_length: int = DEFAULT_LINE_LENGTH
|
||||||
string_normalization: bool = True
|
string_normalization: bool = True
|
||||||
is_pyi: bool = False
|
is_pyi: bool = False
|
||||||
is_ipynb: bool = False
|
is_ipynb: bool = False
|
||||||
skip_source_first_line: bool = False
|
skip_source_first_line: bool = False
|
||||||
magic_trailing_comma: bool = True
|
magic_trailing_comma: bool = True
|
||||||
python_cell_magics: Set[str] = field(default_factory=set)
|
python_cell_magics: set[str] = field(default_factory=set)
|
||||||
preview: bool = False
|
preview: bool = False
|
||||||
unstable: bool = False
|
unstable: bool = False
|
||||||
enabled_features: Set[Preview] = field(default_factory=set)
|
enabled_features: set[Preview] = field(default_factory=set)
|
||||||
|
|
||||||
def __contains__(self, feature: Preview) -> bool:
|
def __contains__(self, feature: Preview) -> bool:
|
||||||
"""
|
"""
|
||||||
@ -285,6 +281,7 @@ def get_cache_key(self) -> str:
|
|||||||
str(int(self.skip_source_first_line)),
|
str(int(self.skip_source_first_line)),
|
||||||
str(int(self.magic_trailing_comma)),
|
str(int(self.magic_trailing_comma)),
|
||||||
str(int(self.preview)),
|
str(int(self.preview)),
|
||||||
|
str(int(self.unstable)),
|
||||||
features_and_magics,
|
features_and_magics,
|
||||||
]
|
]
|
||||||
return ".".join(parts)
|
return ".".join(parts)
|
||||||
|
@ -3,18 +3,8 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from typing import (
|
from collections.abc import Iterator
|
||||||
Final,
|
from typing import Final, Generic, Literal, Optional, TypeVar, Union
|
||||||
Generic,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Literal,
|
|
||||||
Optional,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 10):
|
if sys.version_info >= (3, 10):
|
||||||
from typing import TypeGuard
|
from typing import TypeGuard
|
||||||
@ -24,7 +14,7 @@
|
|||||||
from mypy_extensions import mypyc_attr
|
from mypy_extensions import mypyc_attr
|
||||||
|
|
||||||
from black.cache import CACHE_DIR
|
from black.cache import CACHE_DIR
|
||||||
from black.mode import Mode, Preview
|
from black.mode import Mode
|
||||||
from black.strings import get_string_prefix, has_triple_quotes
|
from black.strings import get_string_prefix, has_triple_quotes
|
||||||
from blib2to3 import pygram
|
from blib2to3 import pygram
|
||||||
from blib2to3.pgen2 import token
|
from blib2to3.pgen2 import token
|
||||||
@ -254,9 +244,9 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no
|
|||||||
elif (
|
elif (
|
||||||
prevp.type == token.STAR
|
prevp.type == token.STAR
|
||||||
and parent_type(prevp) == syms.star_expr
|
and parent_type(prevp) == syms.star_expr
|
||||||
and parent_type(prevp.parent) == syms.subscriptlist
|
and parent_type(prevp.parent) in (syms.subscriptlist, syms.tname_star)
|
||||||
):
|
):
|
||||||
# No space between typevar tuples.
|
# No space between typevar tuples or unpacking them.
|
||||||
return NO
|
return NO
|
||||||
|
|
||||||
elif prevp.type in VARARGS_SPECIALS:
|
elif prevp.type in VARARGS_SPECIALS:
|
||||||
@ -456,7 +446,7 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool:
|
def prev_siblings_are(node: Optional[LN], tokens: list[Optional[NodeType]]) -> bool:
|
||||||
"""Return if the `node` and its previous siblings match types against the provided
|
"""Return if the `node` and its previous siblings match types against the provided
|
||||||
list of tokens; the provided `node`has its type matched against the last element in
|
list of tokens; the provided `node`has its type matched against the last element in
|
||||||
the list. `None` can be used as the first element to declare that the start of the
|
the list. `None` can be used as the first element to declare that the start of the
|
||||||
@ -555,7 +545,7 @@ def is_arith_like(node: LN) -> bool:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def is_docstring(node: NL, mode: Mode) -> bool:
|
def is_docstring(node: NL) -> bool:
|
||||||
if isinstance(node, Leaf):
|
if isinstance(node, Leaf):
|
||||||
if node.type != token.STRING:
|
if node.type != token.STRING:
|
||||||
return False
|
return False
|
||||||
@ -565,8 +555,7 @@ def is_docstring(node: NL, mode: Mode) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
if (
|
if (
|
||||||
Preview.unify_docstring_detection in mode
|
node.parent
|
||||||
and node.parent
|
|
||||||
and node.parent.type == syms.simple_stmt
|
and node.parent.type == syms.simple_stmt
|
||||||
and not node.parent.prev_sibling
|
and not node.parent.prev_sibling
|
||||||
and node.parent.parent
|
and node.parent.parent
|
||||||
@ -614,6 +603,17 @@ def is_one_tuple(node: LN) -> bool:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_tuple(node: LN) -> bool:
|
||||||
|
"""Return True if `node` holds a tuple."""
|
||||||
|
if node.type != syms.atom:
|
||||||
|
return False
|
||||||
|
gexp = unwrap_singleton_parenthesis(node)
|
||||||
|
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def is_tuple_containing_walrus(node: LN) -> bool:
|
def is_tuple_containing_walrus(node: LN) -> bool:
|
||||||
"""Return True if `node` holds a tuple that contains a walrus operator."""
|
"""Return True if `node` holds a tuple that contains a walrus operator."""
|
||||||
if node.type != syms.atom:
|
if node.type != syms.atom:
|
||||||
@ -625,11 +625,33 @@ def is_tuple_containing_walrus(node: LN) -> bool:
|
|||||||
return any(child.type == syms.namedexpr_test for child in gexp.children)
|
return any(child.type == syms.namedexpr_test for child in gexp.children)
|
||||||
|
|
||||||
|
|
||||||
|
def is_tuple_containing_star(node: LN) -> bool:
|
||||||
|
"""Return True if `node` holds a tuple that contains a star operator."""
|
||||||
|
if node.type != syms.atom:
|
||||||
|
return False
|
||||||
|
gexp = unwrap_singleton_parenthesis(node)
|
||||||
|
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return any(child.type == syms.star_expr for child in gexp.children)
|
||||||
|
|
||||||
|
|
||||||
|
def is_generator(node: LN) -> bool:
|
||||||
|
"""Return True if `node` holds a generator."""
|
||||||
|
if node.type != syms.atom:
|
||||||
|
return False
|
||||||
|
gexp = unwrap_singleton_parenthesis(node)
|
||||||
|
if gexp is None or gexp.type != syms.testlist_gexp:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return any(child.type == syms.old_comp_for for child in gexp.children)
|
||||||
|
|
||||||
|
|
||||||
def is_one_sequence_between(
|
def is_one_sequence_between(
|
||||||
opening: Leaf,
|
opening: Leaf,
|
||||||
closing: Leaf,
|
closing: Leaf,
|
||||||
leaves: List[Leaf],
|
leaves: list[Leaf],
|
||||||
brackets: Tuple[int, int] = (token.LPAR, token.RPAR),
|
brackets: tuple[int, int] = (token.LPAR, token.RPAR),
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Return True if content between `opening` and `closing` is a one-sequence."""
|
"""Return True if content between `opening` and `closing` is a one-sequence."""
|
||||||
if (opening.type, closing.type) != brackets:
|
if (opening.type, closing.type) != brackets:
|
||||||
@ -739,7 +761,7 @@ def is_yield(node: LN) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
|
def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool:
|
||||||
"""Return True if `leaf` is a star or double star in a vararg or kwarg.
|
"""Return True if `leaf` is a star or double star in a vararg or kwarg.
|
||||||
|
|
||||||
If `within` includes VARARGS_PARENTS, this applies to function signatures.
|
If `within` includes VARARGS_PARENTS, this applies to function signatures.
|
||||||
@ -1006,6 +1028,7 @@ def get_annotation_type(leaf: Leaf) -> Literal["return", "param", None]:
|
|||||||
|
|
||||||
def is_part_of_annotation(leaf: Leaf) -> bool:
|
def is_part_of_annotation(leaf: Leaf) -> bool:
|
||||||
"""Returns whether this leaf is part of a type annotation."""
|
"""Returns whether this leaf is part of a type annotation."""
|
||||||
|
assert leaf.parent is not None
|
||||||
return get_annotation_type(leaf) is not None
|
return get_annotation_type(leaf) is not None
|
||||||
|
|
||||||
|
|
||||||
@ -1035,3 +1058,21 @@ def furthest_ancestor_with_last_leaf(leaf: Leaf) -> LN:
|
|||||||
while node.parent and node.parent.children and node is node.parent.children[-1]:
|
while node.parent and node.parent.children and node is node.parent.children[-1]:
|
||||||
node = node.parent
|
node = node.parent
|
||||||
return node
|
return node
|
||||||
|
|
||||||
|
|
||||||
|
def has_sibling_with_type(node: LN, type: int) -> bool:
|
||||||
|
# Check previous siblings
|
||||||
|
sibling = node.prev_sibling
|
||||||
|
while sibling is not None:
|
||||||
|
if sibling.type == type:
|
||||||
|
return True
|
||||||
|
sibling = sibling.prev_sibling
|
||||||
|
|
||||||
|
# Check next siblings
|
||||||
|
sibling = node.next_sibling
|
||||||
|
while sibling is not None:
|
||||||
|
if sibling.type == type:
|
||||||
|
return True
|
||||||
|
sibling = sibling.next_sibling
|
||||||
|
|
||||||
|
return False
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import Any, List, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from click import echo, style
|
from click import echo, style
|
||||||
from mypy_extensions import mypyc_attr
|
from mypy_extensions import mypyc_attr
|
||||||
@ -59,7 +59,7 @@ def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
|||||||
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
|
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
|
||||||
|
|
||||||
|
|
||||||
def _splitlines_no_ff(source: str) -> List[str]:
|
def _splitlines_no_ff(source: str) -> list[str]:
|
||||||
"""Split a string into lines ignoring form feed and other chars.
|
"""Split a string into lines ignoring form feed and other chars.
|
||||||
|
|
||||||
This mimics how the Python parser splits source code.
|
This mimics how the Python parser splits source code.
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
import ast
|
import ast
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
from typing import Iterable, Iterator, List, Set, Tuple
|
from collections.abc import Collection, Iterator
|
||||||
|
|
||||||
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
||||||
from black.nodes import syms
|
from black.nodes import syms
|
||||||
@ -21,7 +21,7 @@ class InvalidInput(ValueError):
|
|||||||
"""Raised when input source code fails all parse attempts."""
|
"""Raised when input source code fails all parse attempts."""
|
||||||
|
|
||||||
|
|
||||||
def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
|
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
|
||||||
if not target_versions:
|
if not target_versions:
|
||||||
# No target_version specified, so try all grammars.
|
# No target_version specified, so try all grammars.
|
||||||
return [
|
return [
|
||||||
@ -52,12 +52,20 @@ def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
|
|||||||
return grammars
|
return grammars
|
||||||
|
|
||||||
|
|
||||||
def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
|
def lib2to3_parse(
|
||||||
|
src_txt: str, target_versions: Collection[TargetVersion] = ()
|
||||||
|
) -> Node:
|
||||||
"""Given a string with source, return the lib2to3 Node."""
|
"""Given a string with source, return the lib2to3 Node."""
|
||||||
if not src_txt.endswith("\n"):
|
if not src_txt.endswith("\n"):
|
||||||
src_txt += "\n"
|
src_txt += "\n"
|
||||||
|
|
||||||
grammars = get_grammars(set(target_versions))
|
grammars = get_grammars(set(target_versions))
|
||||||
|
if target_versions:
|
||||||
|
max_tv = max(target_versions, key=lambda tv: tv.value)
|
||||||
|
tv_str = f" for target version {max_tv.pretty()}"
|
||||||
|
else:
|
||||||
|
tv_str = ""
|
||||||
|
|
||||||
errors = {}
|
errors = {}
|
||||||
for grammar in grammars:
|
for grammar in grammars:
|
||||||
drv = driver.Driver(grammar)
|
drv = driver.Driver(grammar)
|
||||||
@ -73,14 +81,14 @@ def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -
|
|||||||
except IndexError:
|
except IndexError:
|
||||||
faulty_line = "<line number missing in source>"
|
faulty_line = "<line number missing in source>"
|
||||||
errors[grammar.version] = InvalidInput(
|
errors[grammar.version] = InvalidInput(
|
||||||
f"Cannot parse: {lineno}:{column}: {faulty_line}"
|
f"Cannot parse{tv_str}: {lineno}:{column}: {faulty_line}"
|
||||||
)
|
)
|
||||||
|
|
||||||
except TokenError as te:
|
except TokenError as te:
|
||||||
# In edge cases these are raised; and typically don't have a "faulty_line".
|
# In edge cases these are raised; and typically don't have a "faulty_line".
|
||||||
lineno, column = te.args[1]
|
lineno, column = te.args[1]
|
||||||
errors[grammar.version] = InvalidInput(
|
errors[grammar.version] = InvalidInput(
|
||||||
f"Cannot parse: {lineno}:{column}: {te.args[0]}"
|
f"Cannot parse{tv_str}: {lineno}:{column}: {te.args[0]}"
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -115,7 +123,7 @@ class ASTSafetyError(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def _parse_single_version(
|
def _parse_single_version(
|
||||||
src: str, version: Tuple[int, int], *, type_comments: bool
|
src: str, version: tuple[int, int], *, type_comments: bool
|
||||||
) -> ast.AST:
|
) -> ast.AST:
|
||||||
filename = "<unknown>"
|
filename = "<unknown>"
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
@ -151,7 +159,7 @@ def parse_ast(src: str) -> ast.AST:
|
|||||||
def _normalize(lineend: str, value: str) -> str:
|
def _normalize(lineend: str, value: str) -> str:
|
||||||
# To normalize, we strip any leading and trailing space from
|
# To normalize, we strip any leading and trailing space from
|
||||||
# each line...
|
# each line...
|
||||||
stripped: List[str] = [i.strip() for i in value.splitlines()]
|
stripped: list[str] = [i.strip() for i in value.splitlines()]
|
||||||
normalized = lineend.join(stripped)
|
normalized = lineend.join(stripped)
|
||||||
# ...and remove any blank lines at the beginning and end of
|
# ...and remove any blank lines at the beginning and end of
|
||||||
# the whole string
|
# the whole string
|
||||||
@ -164,14 +172,14 @@ def stringify_ast(node: ast.AST) -> Iterator[str]:
|
|||||||
|
|
||||||
|
|
||||||
def _stringify_ast_with_new_parent(
|
def _stringify_ast_with_new_parent(
|
||||||
node: ast.AST, parent_stack: List[ast.AST], new_parent: ast.AST
|
node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST
|
||||||
) -> Iterator[str]:
|
) -> Iterator[str]:
|
||||||
parent_stack.append(new_parent)
|
parent_stack.append(new_parent)
|
||||||
yield from _stringify_ast(node, parent_stack)
|
yield from _stringify_ast(node, parent_stack)
|
||||||
parent_stack.pop()
|
parent_stack.pop()
|
||||||
|
|
||||||
|
|
||||||
def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]:
|
||||||
if (
|
if (
|
||||||
isinstance(node, ast.Constant)
|
isinstance(node, ast.Constant)
|
||||||
and isinstance(node.value, str)
|
and isinstance(node.value, str)
|
||||||
@ -205,7 +213,7 @@ def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
|||||||
and isinstance(node, ast.Delete)
|
and isinstance(node, ast.Delete)
|
||||||
and isinstance(item, ast.Tuple)
|
and isinstance(item, ast.Tuple)
|
||||||
):
|
):
|
||||||
for elt in item.elts:
|
for elt in _unwrap_tuples(item):
|
||||||
yield from _stringify_ast_with_new_parent(
|
yield from _stringify_ast_with_new_parent(
|
||||||
elt, parent_stack, node
|
elt, parent_stack, node
|
||||||
)
|
)
|
||||||
@ -242,3 +250,11 @@ def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]:
|
|||||||
)
|
)
|
||||||
|
|
||||||
yield f"{' ' * len(parent_stack)}) # /{node.__class__.__name__}"
|
yield f"{' ' * len(parent_stack)}) # /{node.__class__.__name__}"
|
||||||
|
|
||||||
|
|
||||||
|
def _unwrap_tuples(node: ast.Tuple) -> Iterator[ast.AST]:
|
||||||
|
for elt in node.elts:
|
||||||
|
if isinstance(elt, ast.Tuple):
|
||||||
|
yield from _unwrap_tuples(elt)
|
||||||
|
else:
|
||||||
|
yield elt
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
"""Functions related to Black's formatting by line ranges feature."""
|
"""Functions related to Black's formatting by line ranges feature."""
|
||||||
|
|
||||||
import difflib
|
import difflib
|
||||||
|
from collections.abc import Collection, Iterator, Sequence
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Collection, Iterator, List, Sequence, Set, Tuple, Union
|
from typing import Union
|
||||||
|
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
LN,
|
LN,
|
||||||
@ -18,8 +19,8 @@
|
|||||||
from blib2to3.pgen2.token import ASYNC, NEWLINE
|
from blib2to3.pgen2.token import ASYNC, NEWLINE
|
||||||
|
|
||||||
|
|
||||||
def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]:
|
||||||
lines: List[Tuple[int, int]] = []
|
lines: list[tuple[int, int]] = []
|
||||||
for lines_str in line_ranges:
|
for lines_str in line_ranges:
|
||||||
parts = lines_str.split("-")
|
parts = lines_str.split("-")
|
||||||
if len(parts) != 2:
|
if len(parts) != 2:
|
||||||
@ -40,14 +41,14 @@ def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]:
|
|||||||
return lines
|
return lines
|
||||||
|
|
||||||
|
|
||||||
def is_valid_line_range(lines: Tuple[int, int]) -> bool:
|
def is_valid_line_range(lines: tuple[int, int]) -> bool:
|
||||||
"""Returns whether the line range is valid."""
|
"""Returns whether the line range is valid."""
|
||||||
return not lines or lines[0] <= lines[1]
|
return not lines or lines[0] <= lines[1]
|
||||||
|
|
||||||
|
|
||||||
def sanitized_lines(
|
def sanitized_lines(
|
||||||
lines: Collection[Tuple[int, int]], src_contents: str
|
lines: Collection[tuple[int, int]], src_contents: str
|
||||||
) -> Collection[Tuple[int, int]]:
|
) -> Collection[tuple[int, int]]:
|
||||||
"""Returns the valid line ranges for the given source.
|
"""Returns the valid line ranges for the given source.
|
||||||
|
|
||||||
This removes ranges that are entirely outside the valid lines.
|
This removes ranges that are entirely outside the valid lines.
|
||||||
@ -74,10 +75,10 @@ def sanitized_lines(
|
|||||||
|
|
||||||
|
|
||||||
def adjusted_lines(
|
def adjusted_lines(
|
||||||
lines: Collection[Tuple[int, int]],
|
lines: Collection[tuple[int, int]],
|
||||||
original_source: str,
|
original_source: str,
|
||||||
modified_source: str,
|
modified_source: str,
|
||||||
) -> List[Tuple[int, int]]:
|
) -> list[tuple[int, int]]:
|
||||||
"""Returns the adjusted line ranges based on edits from the original code.
|
"""Returns the adjusted line ranges based on edits from the original code.
|
||||||
|
|
||||||
This computes the new line ranges by diffing original_source and
|
This computes the new line ranges by diffing original_source and
|
||||||
@ -153,7 +154,7 @@ def adjusted_lines(
|
|||||||
return new_lines
|
return new_lines
|
||||||
|
|
||||||
|
|
||||||
def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]]) -> None:
|
def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None:
|
||||||
"""Converts unchanged lines to STANDALONE_COMMENT.
|
"""Converts unchanged lines to STANDALONE_COMMENT.
|
||||||
|
|
||||||
The idea is similar to how `# fmt: on/off` is implemented. It also converts the
|
The idea is similar to how `# fmt: on/off` is implemented. It also converts the
|
||||||
@ -177,7 +178,7 @@ def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]])
|
|||||||
more formatting to pass (1). However, it's hard to get it correct when
|
more formatting to pass (1). However, it's hard to get it correct when
|
||||||
incorrect indentations are used. So we defer this to future optimizations.
|
incorrect indentations are used. So we defer this to future optimizations.
|
||||||
"""
|
"""
|
||||||
lines_set: Set[int] = set()
|
lines_set: set[int] = set()
|
||||||
for start, end in lines:
|
for start, end in lines:
|
||||||
lines_set.update(range(start, end + 1))
|
lines_set.update(range(start, end + 1))
|
||||||
visitor = _TopLevelStatementsVisitor(lines_set)
|
visitor = _TopLevelStatementsVisitor(lines_set)
|
||||||
@ -205,7 +206,7 @@ class _TopLevelStatementsVisitor(Visitor[None]):
|
|||||||
classes/functions/statements.
|
classes/functions/statements.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, lines_set: Set[int]):
|
def __init__(self, lines_set: set[int]):
|
||||||
self._lines_set = lines_set
|
self._lines_set = lines_set
|
||||||
|
|
||||||
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
|
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
|
||||||
@ -249,7 +250,7 @@ def visit_suite(self, node: Node) -> Iterator[None]:
|
|||||||
_convert_node_to_standalone_comment(semantic_parent)
|
_convert_node_to_standalone_comment(semantic_parent)
|
||||||
|
|
||||||
|
|
||||||
def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None:
|
||||||
"""Converts unchanged to STANDALONE_COMMENT line by line."""
|
"""Converts unchanged to STANDALONE_COMMENT line by line."""
|
||||||
for leaf in node.leaves():
|
for leaf in node.leaves():
|
||||||
if leaf.type != NEWLINE:
|
if leaf.type != NEWLINE:
|
||||||
@ -261,7 +262,7 @@ def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None:
|
|||||||
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
|
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
|
||||||
# Here we need to check `subject_expr`. The `case_block+` will be
|
# Here we need to check `subject_expr`. The `case_block+` will be
|
||||||
# checked by their own NEWLINEs.
|
# checked by their own NEWLINEs.
|
||||||
nodes_to_ignore: List[LN] = []
|
nodes_to_ignore: list[LN] = []
|
||||||
prev_sibling = leaf.prev_sibling
|
prev_sibling = leaf.prev_sibling
|
||||||
while prev_sibling:
|
while prev_sibling:
|
||||||
nodes_to_ignore.insert(0, prev_sibling)
|
nodes_to_ignore.insert(0, prev_sibling)
|
||||||
@ -382,7 +383,7 @@ def _leaf_line_end(leaf: Leaf) -> int:
|
|||||||
return leaf.lineno + str(leaf).count("\n")
|
return leaf.lineno + str(leaf).count("\n")
|
||||||
|
|
||||||
|
|
||||||
def _get_line_range(node_or_nodes: Union[LN, List[LN]]) -> Set[int]:
|
def _get_line_range(node_or_nodes: Union[LN, list[LN]]) -> set[int]:
|
||||||
"""Returns the line range of this node or list of nodes."""
|
"""Returns the line range of this node or list of nodes."""
|
||||||
if isinstance(node_or_nodes, list):
|
if isinstance(node_or_nodes, list):
|
||||||
nodes = node_or_nodes
|
nodes = node_or_nodes
|
||||||
@ -463,7 +464,7 @@ def _calculate_lines_mappings(
|
|||||||
modified_source.splitlines(keepends=True),
|
modified_source.splitlines(keepends=True),
|
||||||
)
|
)
|
||||||
matching_blocks = matcher.get_matching_blocks()
|
matching_blocks = matcher.get_matching_blocks()
|
||||||
lines_mappings: List[_LinesMapping] = []
|
lines_mappings: list[_LinesMapping] = []
|
||||||
# matching_blocks is a sequence of "same block of code ranges", see
|
# matching_blocks is a sequence of "same block of code ranges", see
|
||||||
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
|
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
|
||||||
# Each block corresponds to a _LinesMapping with is_changed_block=False,
|
# Each block corresponds to a _LinesMapping with is_changed_block=False,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||||
"$id": "https://github.com/psf/black/blob/main/black/resources/black.schema.json",
|
"$id": "https://github.com/psf/black/blob/main/src/black/resources/black.schema.json",
|
||||||
"$comment": "tool.black table in pyproject.toml",
|
"$comment": "tool.black table in pyproject.toml",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
@ -79,18 +79,12 @@
|
|||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"enum": [
|
"enum": [
|
||||||
"hex_codes_in_unicode_sequences",
|
|
||||||
"string_processing",
|
"string_processing",
|
||||||
"hug_parens_with_braces_and_square_brackets",
|
"hug_parens_with_braces_and_square_brackets",
|
||||||
"unify_docstring_detection",
|
|
||||||
"no_normalize_fmt_skip_whitespace",
|
|
||||||
"wrap_long_dict_values_in_parens",
|
"wrap_long_dict_values_in_parens",
|
||||||
"multiline_string_handling",
|
"multiline_string_handling",
|
||||||
"typed_params_trailing_comma",
|
"always_one_newline_after_import",
|
||||||
"is_simple_lookup_for_doublestar_expression",
|
"fix_fmt_skip_in_one_liners"
|
||||||
"docstring_check_for_newline",
|
|
||||||
"remove_redundant_guard_parens",
|
|
||||||
"parens_for_long_if_clauses_in_case_block"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"description": "Enable specific features included in the `--unstable` style. Requires `--preview`. No compatibility guarantees are provided on the behavior or existence of any unstable features."
|
"description": "Enable specific features included in the `--unstable` style. Requires `--preview`. No compatibility guarantees are provided on the behavior or existence of any unstable features."
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import importlib.resources
|
import importlib.resources
|
||||||
import json
|
import json
|
||||||
import sys
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
@ -11,10 +10,6 @@ def get_schema(tool_name: str = "black") -> Any:
|
|||||||
pkg = "black.resources"
|
pkg = "black.resources"
|
||||||
fname = "black.schema.json"
|
fname = "black.schema.json"
|
||||||
|
|
||||||
if sys.version_info < (3, 9):
|
schema = importlib.resources.files(pkg).joinpath(fname)
|
||||||
with importlib.resources.open_text(pkg, fname, encoding="utf-8") as f:
|
|
||||||
return json.load(f)
|
|
||||||
|
|
||||||
schema = importlib.resources.files(pkg).joinpath(fname) # type: ignore[unreachable]
|
|
||||||
with schema.open(encoding="utf-8") as f:
|
with schema.open(encoding="utf-8") as f:
|
||||||
return json.load(f)
|
return json.load(f)
|
||||||
|
@ -5,7 +5,8 @@
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from typing import Final, List, Match, Pattern, Tuple
|
from re import Match, Pattern
|
||||||
|
from typing import Final
|
||||||
|
|
||||||
from black._width_table import WIDTH_TABLE
|
from black._width_table import WIDTH_TABLE
|
||||||
from blib2to3.pytree import Leaf
|
from blib2to3.pytree import Leaf
|
||||||
@ -43,7 +44,7 @@ def has_triple_quotes(string: str) -> bool:
|
|||||||
return raw_string[:3] in {'"""', "'''"}
|
return raw_string[:3] in {'"""', "'''"}
|
||||||
|
|
||||||
|
|
||||||
def lines_with_leading_tabs_expanded(s: str) -> List[str]:
|
def lines_with_leading_tabs_expanded(s: str) -> list[str]:
|
||||||
"""
|
"""
|
||||||
Splits string into lines and expands only leading tabs (following the normal
|
Splits string into lines and expands only leading tabs (following the normal
|
||||||
Python rules)
|
Python rules)
|
||||||
@ -62,10 +63,9 @@ def lines_with_leading_tabs_expanded(s: str) -> List[str]:
|
|||||||
return lines
|
return lines
|
||||||
|
|
||||||
|
|
||||||
def fix_docstring(docstring: str, prefix: str) -> str:
|
def fix_multiline_docstring(docstring: str, prefix: str) -> str:
|
||||||
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
|
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
|
||||||
if not docstring:
|
assert docstring, "INTERNAL ERROR: Multiline docstrings cannot be empty"
|
||||||
return ""
|
|
||||||
lines = lines_with_leading_tabs_expanded(docstring)
|
lines = lines_with_leading_tabs_expanded(docstring)
|
||||||
# Determine minimum indentation (first line doesn't count):
|
# Determine minimum indentation (first line doesn't count):
|
||||||
indent = sys.maxsize
|
indent = sys.maxsize
|
||||||
@ -185,8 +185,7 @@ def normalize_string_quotes(s: str) -> str:
|
|||||||
orig_quote = "'"
|
orig_quote = "'"
|
||||||
new_quote = '"'
|
new_quote = '"'
|
||||||
first_quote_pos = s.find(orig_quote)
|
first_quote_pos = s.find(orig_quote)
|
||||||
if first_quote_pos == -1:
|
assert first_quote_pos != -1, f"INTERNAL ERROR: Malformed string {s!r}"
|
||||||
return s # There's an internal error
|
|
||||||
|
|
||||||
prefix = s[:first_quote_pos]
|
prefix = s[:first_quote_pos]
|
||||||
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
|
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
|
||||||
@ -242,9 +241,9 @@ def normalize_string_quotes(s: str) -> str:
|
|||||||
|
|
||||||
def normalize_fstring_quotes(
|
def normalize_fstring_quotes(
|
||||||
quote: str,
|
quote: str,
|
||||||
middles: List[Leaf],
|
middles: list[Leaf],
|
||||||
is_raw_fstring: bool,
|
is_raw_fstring: bool,
|
||||||
) -> Tuple[List[Leaf], str]:
|
) -> tuple[list[Leaf], str]:
|
||||||
"""Prefer double quotes but only if it doesn't cause more escaping.
|
"""Prefer double quotes but only if it doesn't cause more escaping.
|
||||||
|
|
||||||
Adds or removes backslashes as appropriate.
|
Adds or removes backslashes as appropriate.
|
||||||
|
@ -5,31 +5,15 @@
|
|||||||
import re
|
import re
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
from collections.abc import Callable, Collection, Iterable, Iterator, Sequence
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import (
|
from typing import Any, ClassVar, Final, Literal, Optional, TypeVar, Union
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
ClassVar,
|
|
||||||
Collection,
|
|
||||||
Dict,
|
|
||||||
Final,
|
|
||||||
Iterable,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Literal,
|
|
||||||
Optional,
|
|
||||||
Sequence,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from mypy_extensions import trait
|
from mypy_extensions import trait
|
||||||
|
|
||||||
from black.comments import contains_pragma_comment
|
from black.comments import contains_pragma_comment
|
||||||
from black.lines import Line, append_leaves
|
from black.lines import Line, append_leaves
|
||||||
from black.mode import Feature, Mode, Preview
|
from black.mode import Feature, Mode
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
CLOSING_BRACKETS,
|
CLOSING_BRACKETS,
|
||||||
OPENING_BRACKETS,
|
OPENING_BRACKETS,
|
||||||
@ -68,7 +52,7 @@ class CannotTransform(Exception):
|
|||||||
ParserState = int
|
ParserState = int
|
||||||
StringID = int
|
StringID = int
|
||||||
TResult = Result[T, CannotTransform] # (T)ransform Result
|
TResult = Result[T, CannotTransform] # (T)ransform Result
|
||||||
TMatchResult = TResult[List[Index]]
|
TMatchResult = TResult[list[Index]]
|
||||||
|
|
||||||
SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops
|
SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops
|
||||||
|
|
||||||
@ -98,14 +82,8 @@ def is_simple_lookup(index: int, kind: Literal[1, -1]) -> bool:
|
|||||||
# Brackets and parentheses indicate calls, subscripts, etc. ...
|
# Brackets and parentheses indicate calls, subscripts, etc. ...
|
||||||
# basically stuff that doesn't count as "simple". Only a NAME lookup
|
# basically stuff that doesn't count as "simple". Only a NAME lookup
|
||||||
# or dotted lookup (eg. NAME.NAME) is OK.
|
# or dotted lookup (eg. NAME.NAME) is OK.
|
||||||
if Preview.is_simple_lookup_for_doublestar_expression not in mode:
|
|
||||||
return original_is_simple_lookup_func(line, index, kind)
|
|
||||||
|
|
||||||
else:
|
|
||||||
if kind == -1:
|
if kind == -1:
|
||||||
return handle_is_simple_look_up_prev(
|
return handle_is_simple_look_up_prev(line, index, {token.RPAR, token.RSQB})
|
||||||
line, index, {token.RPAR, token.RSQB}
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
return handle_is_simple_lookup_forward(
|
return handle_is_simple_lookup_forward(
|
||||||
line, index, {token.LPAR, token.LSQB}
|
line, index, {token.LPAR, token.LSQB}
|
||||||
@ -155,31 +133,7 @@ def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool:
|
|||||||
yield new_line
|
yield new_line
|
||||||
|
|
||||||
|
|
||||||
def original_is_simple_lookup_func(
|
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool:
|
||||||
line: Line, index: int, step: Literal[1, -1]
|
|
||||||
) -> bool:
|
|
||||||
if step == -1:
|
|
||||||
disallowed = {token.RPAR, token.RSQB}
|
|
||||||
else:
|
|
||||||
disallowed = {token.LPAR, token.LSQB}
|
|
||||||
|
|
||||||
while 0 <= index < len(line.leaves):
|
|
||||||
current = line.leaves[index]
|
|
||||||
if current.type in disallowed:
|
|
||||||
return False
|
|
||||||
if current.type not in {token.NAME, token.DOT} or current.value == "for":
|
|
||||||
# If the current token isn't disallowed, we'll assume this is
|
|
||||||
# simple as only the disallowed tokens are semantically
|
|
||||||
# attached to this lookup expression we're checking. Also,
|
|
||||||
# stop early if we hit the 'for' bit of a comprehension.
|
|
||||||
return True
|
|
||||||
|
|
||||||
index += step
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int]) -> bool:
|
|
||||||
"""
|
"""
|
||||||
Handling the determination of is_simple_lookup for the lines prior to the doublestar
|
Handling the determination of is_simple_lookup for the lines prior to the doublestar
|
||||||
token. This is required because of the need to isolate the chained expression
|
token. This is required because of the need to isolate the chained expression
|
||||||
@ -202,7 +156,7 @@ def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int])
|
|||||||
|
|
||||||
|
|
||||||
def handle_is_simple_lookup_forward(
|
def handle_is_simple_lookup_forward(
|
||||||
line: Line, index: int, disallowed: Set[int]
|
line: Line, index: int, disallowed: set[int]
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Handling decision is_simple_lookup for the lines behind the doublestar token.
|
Handling decision is_simple_lookup for the lines behind the doublestar token.
|
||||||
@ -227,7 +181,7 @@ def handle_is_simple_lookup_forward(
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def is_expression_chained(chained_leaves: List[Leaf]) -> bool:
|
def is_expression_chained(chained_leaves: list[Leaf]) -> bool:
|
||||||
"""
|
"""
|
||||||
Function to determine if the variable is a chained call.
|
Function to determine if the variable is a chained call.
|
||||||
(e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call)
|
(e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call)
|
||||||
@ -298,7 +252,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
"""
|
"""
|
||||||
Yields:
|
Yields:
|
||||||
@ -388,8 +342,8 @@ class CustomSplitMapMixin:
|
|||||||
the resultant substrings go over the configured max line length.
|
the resultant substrings go over the configured max line length.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_Key: ClassVar = Tuple[StringID, str]
|
_Key: ClassVar = tuple[StringID, str]
|
||||||
_CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict(
|
_CUSTOM_SPLIT_MAP: ClassVar[dict[_Key, tuple[CustomSplit, ...]]] = defaultdict(
|
||||||
tuple
|
tuple
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -413,7 +367,7 @@ def add_custom_splits(
|
|||||||
key = self._get_key(string)
|
key = self._get_key(string)
|
||||||
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
||||||
|
|
||||||
def pop_custom_splits(self, string: str) -> List[CustomSplit]:
|
def pop_custom_splits(self, string: str) -> list[CustomSplit]:
|
||||||
"""Custom Split Map Getter Method
|
"""Custom Split Map Getter Method
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@ -488,7 +442,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
break
|
break
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
if not is_part_of_annotation(leaf) and not contains_comment:
|
if not contains_comment and not is_part_of_annotation(leaf):
|
||||||
string_indices.append(idx)
|
string_indices.append(idx)
|
||||||
|
|
||||||
# Advance to the next non-STRING leaf.
|
# Advance to the next non-STRING leaf.
|
||||||
@ -512,7 +466,7 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
return TErr("This line has no strings that need merging.")
|
return TErr("This line has no strings that need merging.")
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
new_line = line
|
new_line = line
|
||||||
|
|
||||||
@ -543,7 +497,7 @@ def do_transform(
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _remove_backslash_line_continuation_chars(
|
def _remove_backslash_line_continuation_chars(
|
||||||
line: Line, string_indices: List[int]
|
line: Line, string_indices: list[int]
|
||||||
) -> TResult[Line]:
|
) -> TResult[Line]:
|
||||||
"""
|
"""
|
||||||
Merge strings that were split across multiple lines using
|
Merge strings that were split across multiple lines using
|
||||||
@ -584,7 +538,7 @@ def _remove_backslash_line_continuation_chars(
|
|||||||
return Ok(new_line)
|
return Ok(new_line)
|
||||||
|
|
||||||
def _merge_string_group(
|
def _merge_string_group(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> TResult[Line]:
|
) -> TResult[Line]:
|
||||||
"""
|
"""
|
||||||
Merges string groups (i.e. set of adjacent strings).
|
Merges string groups (i.e. set of adjacent strings).
|
||||||
@ -603,7 +557,7 @@ def _merge_string_group(
|
|||||||
is_valid_index = is_valid_index_factory(LL)
|
is_valid_index = is_valid_index_factory(LL)
|
||||||
|
|
||||||
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
|
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
|
||||||
merged_string_idx_dict: Dict[int, Tuple[int, Leaf]] = {}
|
merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {}
|
||||||
for string_idx in string_indices:
|
for string_idx in string_indices:
|
||||||
vresult = self._validate_msg(line, string_idx)
|
vresult = self._validate_msg(line, string_idx)
|
||||||
if isinstance(vresult, Err):
|
if isinstance(vresult, Err):
|
||||||
@ -639,8 +593,8 @@ def _merge_string_group(
|
|||||||
return Ok(new_line)
|
return Ok(new_line)
|
||||||
|
|
||||||
def _merge_one_string_group(
|
def _merge_one_string_group(
|
||||||
self, LL: List[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
|
||||||
) -> Tuple[int, Leaf]:
|
) -> tuple[int, Leaf]:
|
||||||
"""
|
"""
|
||||||
Merges one string group where the first string in the group is
|
Merges one string group where the first string in the group is
|
||||||
`LL[string_idx]`.
|
`LL[string_idx]`.
|
||||||
@ -676,10 +630,10 @@ def make_naked(string: str, string_prefix: str) -> str:
|
|||||||
"""
|
"""
|
||||||
assert_is_leaf_string(string)
|
assert_is_leaf_string(string)
|
||||||
if "f" in string_prefix:
|
if "f" in string_prefix:
|
||||||
f_expressions = (
|
f_expressions = [
|
||||||
string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces
|
string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces
|
||||||
for span in iter_fexpr_spans(string)
|
for span in iter_fexpr_spans(string)
|
||||||
)
|
]
|
||||||
debug_expressions_contain_visible_quotes = any(
|
debug_expressions_contain_visible_quotes = any(
|
||||||
re.search(r".*[\'\"].*(?<![!:=])={1}(?!=)(?![^\s:])", expression)
|
re.search(r".*[\'\"].*(?<![!:=])={1}(?!=)(?![^\s:])", expression)
|
||||||
for expression in f_expressions
|
for expression in f_expressions
|
||||||
@ -810,6 +764,8 @@ def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
|
|||||||
- The set of all string prefixes in the string group is of
|
- The set of all string prefixes in the string group is of
|
||||||
length greater than one and is not equal to {"", "f"}.
|
length greater than one and is not equal to {"", "f"}.
|
||||||
- The string group consists of raw strings.
|
- The string group consists of raw strings.
|
||||||
|
- The string group would merge f-strings with different quote types
|
||||||
|
and internal quotes.
|
||||||
- The string group is stringified type annotations. We don't want to
|
- The string group is stringified type annotations. We don't want to
|
||||||
process stringified type annotations since pyright doesn't support
|
process stringified type annotations since pyright doesn't support
|
||||||
them spanning multiple string values. (NOTE: mypy, pytype, pyre do
|
them spanning multiple string values. (NOTE: mypy, pytype, pyre do
|
||||||
@ -836,6 +792,8 @@ def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
|
|||||||
|
|
||||||
i += inc
|
i += inc
|
||||||
|
|
||||||
|
QUOTE = line.leaves[string_idx].value[-1]
|
||||||
|
|
||||||
num_of_inline_string_comments = 0
|
num_of_inline_string_comments = 0
|
||||||
set_of_prefixes = set()
|
set_of_prefixes = set()
|
||||||
num_of_strings = 0
|
num_of_strings = 0
|
||||||
@ -858,6 +816,19 @@ def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
|
|||||||
|
|
||||||
set_of_prefixes.add(prefix)
|
set_of_prefixes.add(prefix)
|
||||||
|
|
||||||
|
if (
|
||||||
|
"f" in prefix
|
||||||
|
and leaf.value[-1] != QUOTE
|
||||||
|
and (
|
||||||
|
"'" in leaf.value[len(prefix) + 1 : -1]
|
||||||
|
or '"' in leaf.value[len(prefix) + 1 : -1]
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return TErr(
|
||||||
|
"StringMerger does NOT merge f-strings with different quote types"
|
||||||
|
" and internal quotes."
|
||||||
|
)
|
||||||
|
|
||||||
if id(leaf) in line.comments:
|
if id(leaf) in line.comments:
|
||||||
num_of_inline_string_comments += 1
|
num_of_inline_string_comments += 1
|
||||||
if contains_pragma_comment(line.comments[id(leaf)]):
|
if contains_pragma_comment(line.comments[id(leaf)]):
|
||||||
@ -886,6 +857,7 @@ class StringParenStripper(StringTransformer):
|
|||||||
The line contains a string which is surrounded by parentheses and:
|
The line contains a string which is surrounded by parentheses and:
|
||||||
- The target string is NOT the only argument to a function call.
|
- The target string is NOT the only argument to a function call.
|
||||||
- The target string is NOT a "pointless" string.
|
- The target string is NOT a "pointless" string.
|
||||||
|
- The target string is NOT a dictionary value.
|
||||||
- If the target string contains a PERCENT, the brackets are not
|
- If the target string contains a PERCENT, the brackets are not
|
||||||
preceded or followed by an operator with higher precedence than
|
preceded or followed by an operator with higher precedence than
|
||||||
PERCENT.
|
PERCENT.
|
||||||
@ -933,11 +905,14 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# That LPAR should NOT be preceded by a function name or a closing
|
# That LPAR should NOT be preceded by a colon (which could be a
|
||||||
# bracket (which could be a function which returns a function or a
|
# dictionary value), function name, or a closing bracket (which
|
||||||
# list/dictionary that contains a function)...
|
# could be a function returning a function or a list/dictionary
|
||||||
|
# containing a function)...
|
||||||
if is_valid_index(idx - 2) and (
|
if is_valid_index(idx - 2) and (
|
||||||
LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS
|
LL[idx - 2].type == token.COLON
|
||||||
|
or LL[idx - 2].type == token.NAME
|
||||||
|
or LL[idx - 2].type in CLOSING_BRACKETS
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -1004,11 +979,11 @@ def do_match(self, line: Line) -> TMatchResult:
|
|||||||
return TErr("This line has no strings wrapped in parens.")
|
return TErr("This line has no strings wrapped in parens.")
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
|
|
||||||
string_and_rpar_indices: List[int] = []
|
string_and_rpar_indices: list[int] = []
|
||||||
for string_idx in string_indices:
|
for string_idx in string_indices:
|
||||||
string_parser = StringParser()
|
string_parser = StringParser()
|
||||||
rpar_idx = string_parser.parse(LL, string_idx)
|
rpar_idx = string_parser.parse(LL, string_idx)
|
||||||
@ -1031,7 +1006,7 @@ def do_transform(
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _transform_to_new_line(
|
def _transform_to_new_line(
|
||||||
self, line: Line, string_and_rpar_indices: List[int]
|
self, line: Line, string_and_rpar_indices: list[int]
|
||||||
) -> Line:
|
) -> Line:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
|
|
||||||
@ -1284,7 +1259,7 @@ def _get_max_string_length(self, line: Line, string_idx: int) -> int:
|
|||||||
return max_string_length
|
return max_string_length
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
def _prefer_paren_wrap_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -1329,14 +1304,14 @@ def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:
|
def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]:
|
||||||
"""
|
"""
|
||||||
Yields spans corresponding to expressions in a given f-string.
|
Yields spans corresponding to expressions in a given f-string.
|
||||||
Spans are half-open ranges (left inclusive, right exclusive).
|
Spans are half-open ranges (left inclusive, right exclusive).
|
||||||
Assumes the input string is a valid f-string, but will not crash if the input
|
Assumes the input string is a valid f-string, but will not crash if the input
|
||||||
string is invalid.
|
string is invalid.
|
||||||
"""
|
"""
|
||||||
stack: List[int] = [] # our curly paren stack
|
stack: list[int] = [] # our curly paren stack
|
||||||
i = 0
|
i = 0
|
||||||
while i < len(s):
|
while i < len(s):
|
||||||
if s[i] == "{":
|
if s[i] == "{":
|
||||||
@ -1499,7 +1474,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
|||||||
return Ok([string_idx])
|
return Ok([string_idx])
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
assert len(string_indices) == 1, (
|
assert len(string_indices) == 1, (
|
||||||
@ -1601,7 +1576,7 @@ def more_splits_should_be_made() -> bool:
|
|||||||
else:
|
else:
|
||||||
return str_width(rest_value) > max_last_string_column()
|
return str_width(rest_value) > max_last_string_column()
|
||||||
|
|
||||||
string_line_results: List[Ok[Line]] = []
|
string_line_results: list[Ok[Line]] = []
|
||||||
while more_splits_should_be_made():
|
while more_splits_should_be_made():
|
||||||
if use_custom_breakpoints:
|
if use_custom_breakpoints:
|
||||||
# Custom User Split (manual)
|
# Custom User Split (manual)
|
||||||
@ -1730,7 +1705,7 @@ def more_splits_should_be_made() -> bool:
|
|||||||
last_line.comments = line.comments.copy()
|
last_line.comments = line.comments.copy()
|
||||||
yield Ok(last_line)
|
yield Ok(last_line)
|
||||||
|
|
||||||
def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||||
"""
|
"""
|
||||||
Yields:
|
Yields:
|
||||||
All ranges of @string which, if @string were to be split there,
|
All ranges of @string which, if @string were to be split there,
|
||||||
@ -1761,7 +1736,7 @@ def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
|||||||
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
|
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
|
||||||
yield begin, end
|
yield begin, end
|
||||||
|
|
||||||
def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
|
||||||
"""
|
"""
|
||||||
Yields:
|
Yields:
|
||||||
All ranges of @string which, if @string were to be split there,
|
All ranges of @string which, if @string were to be split there,
|
||||||
@ -1772,8 +1747,8 @@ def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
|
|||||||
return
|
return
|
||||||
yield from iter_fexpr_spans(string)
|
yield from iter_fexpr_spans(string)
|
||||||
|
|
||||||
def _get_illegal_split_indices(self, string: str) -> Set[Index]:
|
def _get_illegal_split_indices(self, string: str) -> set[Index]:
|
||||||
illegal_indices: Set[Index] = set()
|
illegal_indices: set[Index] = set()
|
||||||
iterators = [
|
iterators = [
|
||||||
self._iter_fexpr_slices(string),
|
self._iter_fexpr_slices(string),
|
||||||
self._iter_nameescape_slices(string),
|
self._iter_nameescape_slices(string),
|
||||||
@ -1899,7 +1874,7 @@ def _normalize_f_string(self, string: str, prefix: str) -> str:
|
|||||||
else:
|
else:
|
||||||
return string
|
return string
|
||||||
|
|
||||||
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]:
|
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]:
|
||||||
LL = list(leaves)
|
LL = list(leaves)
|
||||||
|
|
||||||
string_op_leaves = []
|
string_op_leaves = []
|
||||||
@ -2008,7 +1983,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
|
|||||||
return TErr("This line does not contain any non-atomic strings.")
|
return TErr("This line does not contain any non-atomic strings.")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _return_match(LL: List[Leaf]) -> Optional[int]:
|
def _return_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2033,7 +2008,7 @@ def _return_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _else_match(LL: List[Leaf]) -> Optional[int]:
|
def _else_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2060,7 +2035,7 @@ def _else_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
def _assert_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2095,7 +2070,7 @@ def _assert_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
def _assign_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2142,7 +2117,7 @@ def _assign_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
def _dict_or_lambda_match(LL: list[Leaf]) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
string_idx such that @LL[string_idx] is equal to our target (i.e.
|
||||||
@ -2181,7 +2156,7 @@ def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def do_transform(
|
def do_transform(
|
||||||
self, line: Line, string_indices: List[int]
|
self, line: Line, string_indices: list[int]
|
||||||
) -> Iterator[TResult[Line]]:
|
) -> Iterator[TResult[Line]]:
|
||||||
LL = line.leaves
|
LL = line.leaves
|
||||||
assert len(string_indices) == 1, (
|
assert len(string_indices) == 1, (
|
||||||
@ -2263,12 +2238,12 @@ def do_transform(
|
|||||||
elif right_leaves and right_leaves[-1].type == token.RPAR:
|
elif right_leaves and right_leaves[-1].type == token.RPAR:
|
||||||
# Special case for lambda expressions as dict's value, e.g.:
|
# Special case for lambda expressions as dict's value, e.g.:
|
||||||
# my_dict = {
|
# my_dict = {
|
||||||
# "key": lambda x: f"formatted: {x},
|
# "key": lambda x: f"formatted: {x}",
|
||||||
# }
|
# }
|
||||||
# After wrapping the dict's value with parentheses, the string is
|
# After wrapping the dict's value with parentheses, the string is
|
||||||
# followed by a RPAR but its opening bracket is lambda's, not
|
# followed by a RPAR but its opening bracket is lambda's, not
|
||||||
# the string's:
|
# the string's:
|
||||||
# "key": (lambda x: f"formatted: {x}),
|
# "key": (lambda x: f"formatted: {x}"),
|
||||||
opening_bracket = right_leaves[-1].opening_bracket
|
opening_bracket = right_leaves[-1].opening_bracket
|
||||||
if opening_bracket is not None and opening_bracket in left_leaves:
|
if opening_bracket is not None and opening_bracket in left_leaves:
|
||||||
index = left_leaves.index(opening_bracket)
|
index = left_leaves.index(opening_bracket)
|
||||||
@ -2347,7 +2322,7 @@ class StringParser:
|
|||||||
DONE: Final = 8
|
DONE: Final = 8
|
||||||
|
|
||||||
# Lookup Table for Next State
|
# Lookup Table for Next State
|
||||||
_goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = {
|
_goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = {
|
||||||
# A string trailer may start with '.' OR '%'.
|
# A string trailer may start with '.' OR '%'.
|
||||||
(START, token.DOT): DOT,
|
(START, token.DOT): DOT,
|
||||||
(START, token.PERCENT): PERCENT,
|
(START, token.PERCENT): PERCENT,
|
||||||
@ -2376,7 +2351,7 @@ def __init__(self) -> None:
|
|||||||
self._state = self.START
|
self._state = self.START
|
||||||
self._unmatched_lpars = 0
|
self._unmatched_lpars = 0
|
||||||
|
|
||||||
def parse(self, leaves: List[Leaf], string_idx: int) -> int:
|
def parse(self, leaves: list[Leaf], string_idx: int) -> int:
|
||||||
"""
|
"""
|
||||||
Pre-conditions:
|
Pre-conditions:
|
||||||
* @leaves[@string_idx].type == token.STRING
|
* @leaves[@string_idx].type == token.STRING
|
||||||
|
@ -2,9 +2,8 @@
|
|||||||
import logging
|
import logging
|
||||||
from concurrent.futures import Executor, ProcessPoolExecutor
|
from concurrent.futures import Executor, ProcessPoolExecutor
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from functools import partial
|
from functools import cache, partial
|
||||||
from multiprocessing import freeze_support
|
from multiprocessing import freeze_support
|
||||||
from typing import Set, Tuple
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
@ -86,12 +85,16 @@ def main(bind_host: str, bind_port: int) -> None:
|
|||||||
web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
|
web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
|
||||||
|
|
||||||
|
|
||||||
|
@cache
|
||||||
|
def executor() -> Executor:
|
||||||
|
return ProcessPoolExecutor()
|
||||||
|
|
||||||
|
|
||||||
def make_app() -> web.Application:
|
def make_app() -> web.Application:
|
||||||
app = web.Application(
|
app = web.Application(
|
||||||
middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))]
|
middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))]
|
||||||
)
|
)
|
||||||
executor = ProcessPoolExecutor()
|
app.add_routes([web.post("/", partial(handle, executor=executor()))])
|
||||||
app.add_routes([web.post("/", partial(handle, executor=executor))])
|
|
||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
@ -191,7 +194,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
|||||||
|
|
||||||
preview = bool(headers.get(PREVIEW, False))
|
preview = bool(headers.get(PREVIEW, False))
|
||||||
unstable = bool(headers.get(UNSTABLE, False))
|
unstable = bool(headers.get(UNSTABLE, False))
|
||||||
enable_features: Set[black.Preview] = set()
|
enable_features: set[black.Preview] = set()
|
||||||
enable_unstable_features = headers.get(ENABLE_UNSTABLE_FEATURE, "").split(",")
|
enable_unstable_features = headers.get(ENABLE_UNSTABLE_FEATURE, "").split(",")
|
||||||
for piece in enable_unstable_features:
|
for piece in enable_unstable_features:
|
||||||
piece = piece.strip()
|
piece = piece.strip()
|
||||||
@ -216,7 +219,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
|
def parse_python_variant_header(value: str) -> tuple[bool, set[black.TargetVersion]]:
|
||||||
if value == "pyi":
|
if value == "pyi":
|
||||||
return True, set()
|
return True, set()
|
||||||
else:
|
else:
|
||||||
|
@ -1,21 +1,11 @@
|
|||||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, TypeVar
|
from collections.abc import Awaitable, Callable, Iterable
|
||||||
|
|
||||||
|
from aiohttp.typedefs import Middleware
|
||||||
|
from aiohttp.web_middlewares import middleware
|
||||||
from aiohttp.web_request import Request
|
from aiohttp.web_request import Request
|
||||||
from aiohttp.web_response import StreamResponse
|
from aiohttp.web_response import StreamResponse
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
F = TypeVar("F", bound=Callable[..., Any])
|
|
||||||
middleware: Callable[[F], F]
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
from aiohttp.web_middlewares import middleware
|
|
||||||
except ImportError:
|
|
||||||
# @middleware is deprecated and its behaviour is the default since aiohttp 4.0
|
|
||||||
# so if it doesn't exist anymore, define a no-op for forward compatibility.
|
|
||||||
middleware = lambda x: x # noqa: E731
|
|
||||||
|
|
||||||
Handler = Callable[[Request], Awaitable[StreamResponse]]
|
Handler = Callable[[Request], Awaitable[StreamResponse]]
|
||||||
Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]]
|
|
||||||
|
|
||||||
|
|
||||||
def cors(allow_headers: Iterable[str]) -> Middleware:
|
def cors(allow_headers: Iterable[str]) -> Middleware:
|
||||||
|
@ -12,9 +12,9 @@ file_input: (NEWLINE | stmt)* ENDMARKER
|
|||||||
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
|
||||||
eval_input: testlist NEWLINE* ENDMARKER
|
eval_input: testlist NEWLINE* ENDMARKER
|
||||||
|
|
||||||
typevar: NAME [':' expr] ['=' expr]
|
typevar: NAME [':' test] ['=' test]
|
||||||
paramspec: '**' NAME ['=' expr]
|
paramspec: '**' NAME ['=' test]
|
||||||
typevartuple: '*' NAME ['=' (expr|star_expr)]
|
typevartuple: '*' NAME ['=' (test|star_expr)]
|
||||||
typeparam: typevar | paramspec | typevartuple
|
typeparam: typevar | paramspec | typevartuple
|
||||||
typeparams: '[' typeparam (',' typeparam)* [','] ']'
|
typeparams: '[' typeparam (',' typeparam)* [','] ']'
|
||||||
|
|
||||||
|
@ -21,13 +21,14 @@
|
|||||||
import os
|
import os
|
||||||
import pkgutil
|
import pkgutil
|
||||||
import sys
|
import sys
|
||||||
|
from collections.abc import Iterable, Iterator
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from logging import Logger
|
from logging import Logger
|
||||||
from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast
|
from typing import IO, Any, Optional, Union, cast
|
||||||
|
|
||||||
from blib2to3.pgen2.grammar import Grammar
|
from blib2to3.pgen2.grammar import Grammar
|
||||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
from blib2to3.pgen2.tokenize import TokenInfo
|
||||||
from blib2to3.pytree import NL
|
from blib2to3.pytree import NL
|
||||||
|
|
||||||
# Pgen imports
|
# Pgen imports
|
||||||
@ -40,7 +41,7 @@
|
|||||||
class ReleaseRange:
|
class ReleaseRange:
|
||||||
start: int
|
start: int
|
||||||
end: Optional[int] = None
|
end: Optional[int] = None
|
||||||
tokens: List[Any] = field(default_factory=list)
|
tokens: list[Any] = field(default_factory=list)
|
||||||
|
|
||||||
def lock(self) -> None:
|
def lock(self) -> None:
|
||||||
total_eaten = len(self.tokens)
|
total_eaten = len(self.tokens)
|
||||||
@ -51,7 +52,7 @@ class TokenProxy:
|
|||||||
def __init__(self, generator: Any) -> None:
|
def __init__(self, generator: Any) -> None:
|
||||||
self._tokens = generator
|
self._tokens = generator
|
||||||
self._counter = 0
|
self._counter = 0
|
||||||
self._release_ranges: List[ReleaseRange] = []
|
self._release_ranges: list[ReleaseRange] = []
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def release(self) -> Iterator["TokenProxy"]:
|
def release(self) -> Iterator["TokenProxy"]:
|
||||||
@ -111,7 +112,7 @@ def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
|
|
||||||
def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> NL:
|
def parse_tokens(self, tokens: Iterable[TokenInfo], debug: bool = False) -> NL:
|
||||||
"""Parse a series of tokens and return the syntax tree."""
|
"""Parse a series of tokens and return the syntax tree."""
|
||||||
# XXX Move the prefix computation into a wrapper around tokenize.
|
# XXX Move the prefix computation into a wrapper around tokenize.
|
||||||
proxy = TokenProxy(tokens)
|
proxy = TokenProxy(tokens)
|
||||||
@ -121,7 +122,7 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
|
|||||||
|
|
||||||
lineno = 1
|
lineno = 1
|
||||||
column = 0
|
column = 0
|
||||||
indent_columns: List[int] = []
|
indent_columns: list[int] = []
|
||||||
type = value = start = end = line_text = None
|
type = value = start = end = line_text = None
|
||||||
prefix = ""
|
prefix = ""
|
||||||
|
|
||||||
@ -179,31 +180,21 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
|
|||||||
assert p.rootnode is not None
|
assert p.rootnode is not None
|
||||||
return p.rootnode
|
return p.rootnode
|
||||||
|
|
||||||
def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:
|
|
||||||
"""Parse a stream and return the syntax tree."""
|
|
||||||
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
|
|
||||||
return self.parse_tokens(tokens, debug)
|
|
||||||
|
|
||||||
def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:
|
|
||||||
"""Parse a stream and return the syntax tree."""
|
|
||||||
return self.parse_stream_raw(stream, debug)
|
|
||||||
|
|
||||||
def parse_file(
|
def parse_file(
|
||||||
self, filename: Path, encoding: Optional[str] = None, debug: bool = False
|
self, filename: Path, encoding: Optional[str] = None, debug: bool = False
|
||||||
) -> NL:
|
) -> NL:
|
||||||
"""Parse a file and return the syntax tree."""
|
"""Parse a file and return the syntax tree."""
|
||||||
with open(filename, encoding=encoding) as stream:
|
with open(filename, encoding=encoding) as stream:
|
||||||
return self.parse_stream(stream, debug)
|
text = stream.read()
|
||||||
|
return self.parse_string(text, debug)
|
||||||
|
|
||||||
def parse_string(self, text: str, debug: bool = False) -> NL:
|
def parse_string(self, text: str, debug: bool = False) -> NL:
|
||||||
"""Parse a string and return the syntax tree."""
|
"""Parse a string and return the syntax tree."""
|
||||||
tokens = tokenize.generate_tokens(
|
tokens = tokenize.tokenize(text, grammar=self.grammar)
|
||||||
io.StringIO(text).readline, grammar=self.grammar
|
|
||||||
)
|
|
||||||
return self.parse_tokens(tokens, debug)
|
return self.parse_tokens(tokens, debug)
|
||||||
|
|
||||||
def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
|
def _partially_consume_prefix(self, prefix: str, column: int) -> tuple[str, str]:
|
||||||
lines: List[str] = []
|
lines: list[str] = []
|
||||||
current_line = ""
|
current_line = ""
|
||||||
current_column = 0
|
current_column = 0
|
||||||
wait_for_nl = False
|
wait_for_nl = False
|
||||||
|
@ -16,15 +16,15 @@
|
|||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
|
from typing import Any, Optional, TypeVar, Union
|
||||||
|
|
||||||
# Local imports
|
# Local imports
|
||||||
from . import token
|
from . import token
|
||||||
|
|
||||||
_P = TypeVar("_P", bound="Grammar")
|
_P = TypeVar("_P", bound="Grammar")
|
||||||
Label = Tuple[int, Optional[str]]
|
Label = tuple[int, Optional[str]]
|
||||||
DFA = List[List[Tuple[int, int]]]
|
DFA = list[list[tuple[int, int]]]
|
||||||
DFAS = Tuple[DFA, Dict[int, int]]
|
DFAS = tuple[DFA, dict[int, int]]
|
||||||
Path = Union[str, "os.PathLike[str]"]
|
Path = Union[str, "os.PathLike[str]"]
|
||||||
|
|
||||||
|
|
||||||
@ -83,16 +83,16 @@ class Grammar:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.symbol2number: Dict[str, int] = {}
|
self.symbol2number: dict[str, int] = {}
|
||||||
self.number2symbol: Dict[int, str] = {}
|
self.number2symbol: dict[int, str] = {}
|
||||||
self.states: List[DFA] = []
|
self.states: list[DFA] = []
|
||||||
self.dfas: Dict[int, DFAS] = {}
|
self.dfas: dict[int, DFAS] = {}
|
||||||
self.labels: List[Label] = [(0, "EMPTY")]
|
self.labels: list[Label] = [(0, "EMPTY")]
|
||||||
self.keywords: Dict[str, int] = {}
|
self.keywords: dict[str, int] = {}
|
||||||
self.soft_keywords: Dict[str, int] = {}
|
self.soft_keywords: dict[str, int] = {}
|
||||||
self.tokens: Dict[int, int] = {}
|
self.tokens: dict[int, int] = {}
|
||||||
self.symbol2label: Dict[str, int] = {}
|
self.symbol2label: dict[str, int] = {}
|
||||||
self.version: Tuple[int, int] = (0, 0)
|
self.version: tuple[int, int] = (0, 0)
|
||||||
self.start = 256
|
self.start = 256
|
||||||
# Python 3.7+ parses async as a keyword, not an identifier
|
# Python 3.7+ parses async as a keyword, not an identifier
|
||||||
self.async_keywords = False
|
self.async_keywords = False
|
||||||
@ -114,7 +114,7 @@ def dump(self, filename: Path) -> None:
|
|||||||
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
|
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
|
||||||
os.replace(f.name, filename)
|
os.replace(f.name, filename)
|
||||||
|
|
||||||
def _update(self, attrs: Dict[str, Any]) -> None:
|
def _update(self, attrs: dict[str, Any]) -> None:
|
||||||
for k, v in attrs.items():
|
for k, v in attrs.items():
|
||||||
setattr(self, k, v)
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
@ -4,9 +4,8 @@
|
|||||||
"""Safely evaluate Python string literals without using eval()."""
|
"""Safely evaluate Python string literals without using eval()."""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from typing import Dict, Match
|
|
||||||
|
|
||||||
simple_escapes: Dict[str, str] = {
|
simple_escapes: dict[str, str] = {
|
||||||
"a": "\a",
|
"a": "\a",
|
||||||
"b": "\b",
|
"b": "\b",
|
||||||
"f": "\f",
|
"f": "\f",
|
||||||
@ -20,7 +19,7 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def escape(m: Match[str]) -> str:
|
def escape(m: re.Match[str]) -> str:
|
||||||
all, tail = m.group(0, 1)
|
all, tail = m.group(0, 1)
|
||||||
assert all.startswith("\\")
|
assert all.startswith("\\")
|
||||||
esc = simple_escapes.get(tail)
|
esc = simple_escapes.get(tail)
|
||||||
@ -29,16 +28,16 @@ def escape(m: Match[str]) -> str:
|
|||||||
if tail.startswith("x"):
|
if tail.startswith("x"):
|
||||||
hexes = tail[1:]
|
hexes = tail[1:]
|
||||||
if len(hexes) < 2:
|
if len(hexes) < 2:
|
||||||
raise ValueError("invalid hex string escape ('\\%s')" % tail)
|
raise ValueError(f"invalid hex string escape ('\\{tail}')")
|
||||||
try:
|
try:
|
||||||
i = int(hexes, 16)
|
i = int(hexes, 16)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError("invalid hex string escape ('\\%s')" % tail) from None
|
raise ValueError(f"invalid hex string escape ('\\{tail}')") from None
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
i = int(tail, 8)
|
i = int(tail, 8)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError("invalid octal string escape ('\\%s')" % tail) from None
|
raise ValueError(f"invalid octal string escape ('\\{tail}')") from None
|
||||||
return chr(i)
|
return chr(i)
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,20 +9,9 @@
|
|||||||
how this parsing engine works.
|
how this parsing engine works.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
from collections.abc import Callable, Iterator
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from typing import (
|
from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
||||||
TYPE_CHECKING,
|
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
Dict,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
from blib2to3.pgen2.grammar import Grammar
|
from blib2to3.pgen2.grammar import Grammar
|
||||||
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
|
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
|
||||||
@ -34,10 +23,10 @@
|
|||||||
from blib2to3.pgen2.driver import TokenProxy
|
from blib2to3.pgen2.driver import TokenProxy
|
||||||
|
|
||||||
|
|
||||||
Results = Dict[str, NL]
|
Results = dict[str, NL]
|
||||||
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
||||||
DFA = List[List[Tuple[int, int]]]
|
DFA = list[list[tuple[int, int]]]
|
||||||
DFAS = Tuple[DFA, Dict[int, int]]
|
DFAS = tuple[DFA, dict[int, int]]
|
||||||
|
|
||||||
|
|
||||||
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
||||||
@ -50,24 +39,24 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
|||||||
|
|
||||||
|
|
||||||
def stack_copy(
|
def stack_copy(
|
||||||
stack: List[Tuple[DFAS, int, RawNode]],
|
stack: list[tuple[DFAS, int, RawNode]],
|
||||||
) -> List[Tuple[DFAS, int, RawNode]]:
|
) -> list[tuple[DFAS, int, RawNode]]:
|
||||||
"""Nodeless stack copy."""
|
"""Nodeless stack copy."""
|
||||||
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
|
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
|
||||||
|
|
||||||
|
|
||||||
class Recorder:
|
class Recorder:
|
||||||
def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None:
|
def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None:
|
||||||
self.parser = parser
|
self.parser = parser
|
||||||
self._ilabels = ilabels
|
self._ilabels = ilabels
|
||||||
self.context = context # not really matter
|
self.context = context # not really matter
|
||||||
|
|
||||||
self._dead_ilabels: Set[int] = set()
|
self._dead_ilabels: set[int] = set()
|
||||||
self._start_point = self.parser.stack
|
self._start_point = self.parser.stack
|
||||||
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
|
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ilabels(self) -> Set[int]:
|
def ilabels(self) -> set[int]:
|
||||||
return self._dead_ilabels.symmetric_difference(self._ilabels)
|
return self._dead_ilabels.symmetric_difference(self._ilabels)
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
@ -100,18 +89,12 @@ def backtrack(self) -> Iterator[None]:
|
|||||||
self.parser.is_backtracking = is_backtracking
|
self.parser.is_backtracking = is_backtracking
|
||||||
|
|
||||||
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
|
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
|
||||||
func: Callable[..., Any]
|
|
||||||
if raw:
|
|
||||||
func = self.parser._addtoken
|
|
||||||
else:
|
|
||||||
func = self.parser.addtoken
|
|
||||||
|
|
||||||
for ilabel in self.ilabels:
|
for ilabel in self.ilabels:
|
||||||
with self.switch_to(ilabel):
|
with self.switch_to(ilabel):
|
||||||
args = [tok_type, tok_val, self.context]
|
|
||||||
if raw:
|
if raw:
|
||||||
args.insert(0, ilabel)
|
self.parser._addtoken(ilabel, tok_type, tok_val, self.context)
|
||||||
func(*args)
|
else:
|
||||||
|
self.parser.addtoken(tok_type, tok_val, self.context)
|
||||||
|
|
||||||
def determine_route(
|
def determine_route(
|
||||||
self, value: Optional[str] = None, force: bool = False
|
self, value: Optional[str] = None, force: bool = False
|
||||||
@ -233,9 +216,9 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
|
|||||||
# where children is a list of nodes or None, and context may be None.
|
# where children is a list of nodes or None, and context may be None.
|
||||||
newnode: RawNode = (start, None, None, [])
|
newnode: RawNode = (start, None, None, [])
|
||||||
stackentry = (self.grammar.dfas[start], 0, newnode)
|
stackentry = (self.grammar.dfas[start], 0, newnode)
|
||||||
self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry]
|
self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry]
|
||||||
self.rootnode: Optional[NL] = None
|
self.rootnode: Optional[NL] = None
|
||||||
self.used_names: Set[str] = set()
|
self.used_names: set[str] = set()
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
self.last_token = None
|
self.last_token = None
|
||||||
|
|
||||||
@ -333,7 +316,7 @@ def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> boo
|
|||||||
# No success finding a transition
|
# No success finding a transition
|
||||||
raise ParseError("bad input", type, value, context)
|
raise ParseError("bad input", type, value, context)
|
||||||
|
|
||||||
def classify(self, type: int, value: str, context: Context) -> List[int]:
|
def classify(self, type: int, value: str, context: Context) -> list[int]:
|
||||||
"""Turn a token into a label. (Internal)
|
"""Turn a token into a label. (Internal)
|
||||||
|
|
||||||
Depending on whether the value is a soft-keyword or not,
|
Depending on whether the value is a soft-keyword or not,
|
||||||
|
@ -2,21 +2,11 @@
|
|||||||
# Licensed to PSF under a Contributor Agreement.
|
# Licensed to PSF under a Contributor Agreement.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from typing import (
|
from collections.abc import Iterator, Sequence
|
||||||
IO,
|
from typing import IO, Any, NoReturn, Optional, Union
|
||||||
Any,
|
|
||||||
Dict,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
NoReturn,
|
|
||||||
Optional,
|
|
||||||
Sequence,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from blib2to3.pgen2 import grammar, token, tokenize
|
from blib2to3.pgen2 import grammar, token, tokenize
|
||||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
from blib2to3.pgen2.tokenize import TokenInfo
|
||||||
|
|
||||||
Path = Union[str, "os.PathLike[str]"]
|
Path = Union[str, "os.PathLike[str]"]
|
||||||
|
|
||||||
@ -28,8 +18,8 @@ class PgenGrammar(grammar.Grammar):
|
|||||||
class ParserGenerator:
|
class ParserGenerator:
|
||||||
filename: Path
|
filename: Path
|
||||||
stream: IO[str]
|
stream: IO[str]
|
||||||
generator: Iterator[GoodTokenInfo]
|
generator: Iterator[TokenInfo]
|
||||||
first: Dict[str, Optional[Dict[str, int]]]
|
first: dict[str, Optional[dict[str, int]]]
|
||||||
|
|
||||||
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
||||||
close_stream = None
|
close_stream = None
|
||||||
@ -37,8 +27,7 @@ def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
|||||||
stream = open(filename, encoding="utf-8")
|
stream = open(filename, encoding="utf-8")
|
||||||
close_stream = stream.close
|
close_stream = stream.close
|
||||||
self.filename = filename
|
self.filename = filename
|
||||||
self.stream = stream
|
self.generator = tokenize.tokenize(stream.read())
|
||||||
self.generator = tokenize.generate_tokens(stream.readline)
|
|
||||||
self.gettoken() # Initialize lookahead
|
self.gettoken() # Initialize lookahead
|
||||||
self.dfas, self.startsymbol = self.parse()
|
self.dfas, self.startsymbol = self.parse()
|
||||||
if close_stream is not None:
|
if close_stream is not None:
|
||||||
@ -71,7 +60,7 @@ def make_grammar(self) -> PgenGrammar:
|
|||||||
c.start = c.symbol2number[self.startsymbol]
|
c.start = c.symbol2number[self.startsymbol]
|
||||||
return c
|
return c
|
||||||
|
|
||||||
def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]:
|
def make_first(self, c: PgenGrammar, name: str) -> dict[int, int]:
|
||||||
rawfirst = self.first[name]
|
rawfirst = self.first[name]
|
||||||
assert rawfirst is not None
|
assert rawfirst is not None
|
||||||
first = {}
|
first = {}
|
||||||
@ -144,14 +133,14 @@ def calcfirst(self, name: str) -> None:
|
|||||||
dfa = self.dfas[name]
|
dfa = self.dfas[name]
|
||||||
self.first[name] = None # dummy to detect left recursion
|
self.first[name] = None # dummy to detect left recursion
|
||||||
state = dfa[0]
|
state = dfa[0]
|
||||||
totalset: Dict[str, int] = {}
|
totalset: dict[str, int] = {}
|
||||||
overlapcheck = {}
|
overlapcheck = {}
|
||||||
for label in state.arcs:
|
for label in state.arcs:
|
||||||
if label in self.dfas:
|
if label in self.dfas:
|
||||||
if label in self.first:
|
if label in self.first:
|
||||||
fset = self.first[label]
|
fset = self.first[label]
|
||||||
if fset is None:
|
if fset is None:
|
||||||
raise ValueError("recursion for rule %r" % name)
|
raise ValueError(f"recursion for rule {name!r}")
|
||||||
else:
|
else:
|
||||||
self.calcfirst(label)
|
self.calcfirst(label)
|
||||||
fset = self.first[label]
|
fset = self.first[label]
|
||||||
@ -161,18 +150,18 @@ def calcfirst(self, name: str) -> None:
|
|||||||
else:
|
else:
|
||||||
totalset[label] = 1
|
totalset[label] = 1
|
||||||
overlapcheck[label] = {label: 1}
|
overlapcheck[label] = {label: 1}
|
||||||
inverse: Dict[str, str] = {}
|
inverse: dict[str, str] = {}
|
||||||
for label, itsfirst in overlapcheck.items():
|
for label, itsfirst in overlapcheck.items():
|
||||||
for symbol in itsfirst:
|
for symbol in itsfirst:
|
||||||
if symbol in inverse:
|
if symbol in inverse:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"rule %s is ambiguous; %s is in the first sets of %s as well"
|
f"rule {name} is ambiguous; {symbol} is in the first sets of"
|
||||||
" as %s" % (name, symbol, label, inverse[symbol])
|
f" {label} as well as {inverse[symbol]}"
|
||||||
)
|
)
|
||||||
inverse[symbol] = label
|
inverse[symbol] = label
|
||||||
self.first[name] = totalset
|
self.first[name] = totalset
|
||||||
|
|
||||||
def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
def parse(self) -> tuple[dict[str, list["DFAState"]], str]:
|
||||||
dfas = {}
|
dfas = {}
|
||||||
startsymbol: Optional[str] = None
|
startsymbol: Optional[str] = None
|
||||||
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
||||||
@ -197,7 +186,7 @@ def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
|
|||||||
assert startsymbol is not None
|
assert startsymbol is not None
|
||||||
return dfas, startsymbol
|
return dfas, startsymbol
|
||||||
|
|
||||||
def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
def make_dfa(self, start: "NFAState", finish: "NFAState") -> list["DFAState"]:
|
||||||
# To turn an NFA into a DFA, we define the states of the DFA
|
# To turn an NFA into a DFA, we define the states of the DFA
|
||||||
# to correspond to *sets* of states of the NFA. Then do some
|
# to correspond to *sets* of states of the NFA. Then do some
|
||||||
# state reduction. Let's represent sets as dicts with 1 for
|
# state reduction. Let's represent sets as dicts with 1 for
|
||||||
@ -205,12 +194,12 @@ def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
|
|||||||
assert isinstance(start, NFAState)
|
assert isinstance(start, NFAState)
|
||||||
assert isinstance(finish, NFAState)
|
assert isinstance(finish, NFAState)
|
||||||
|
|
||||||
def closure(state: NFAState) -> Dict[NFAState, int]:
|
def closure(state: NFAState) -> dict[NFAState, int]:
|
||||||
base: Dict[NFAState, int] = {}
|
base: dict[NFAState, int] = {}
|
||||||
addclosure(state, base)
|
addclosure(state, base)
|
||||||
return base
|
return base
|
||||||
|
|
||||||
def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
def addclosure(state: NFAState, base: dict[NFAState, int]) -> None:
|
||||||
assert isinstance(state, NFAState)
|
assert isinstance(state, NFAState)
|
||||||
if state in base:
|
if state in base:
|
||||||
return
|
return
|
||||||
@ -221,7 +210,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
|
|||||||
|
|
||||||
states = [DFAState(closure(start), finish)]
|
states = [DFAState(closure(start), finish)]
|
||||||
for state in states: # NB states grows while we're iterating
|
for state in states: # NB states grows while we're iterating
|
||||||
arcs: Dict[str, Dict[NFAState, int]] = {}
|
arcs: dict[str, dict[NFAState, int]] = {}
|
||||||
for nfastate in state.nfaset:
|
for nfastate in state.nfaset:
|
||||||
for label, next in nfastate.arcs:
|
for label, next in nfastate.arcs:
|
||||||
if label is not None:
|
if label is not None:
|
||||||
@ -248,18 +237,18 @@ def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None:
|
|||||||
j = len(todo)
|
j = len(todo)
|
||||||
todo.append(next)
|
todo.append(next)
|
||||||
if label is None:
|
if label is None:
|
||||||
print(" -> %d" % j)
|
print(f" -> {j}")
|
||||||
else:
|
else:
|
||||||
print(" %s -> %d" % (label, j))
|
print(f" {label} -> {j}")
|
||||||
|
|
||||||
def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
|
def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
|
||||||
print("Dump of DFA for", name)
|
print("Dump of DFA for", name)
|
||||||
for i, state in enumerate(dfa):
|
for i, state in enumerate(dfa):
|
||||||
print(" State", i, state.isfinal and "(final)" or "")
|
print(" State", i, state.isfinal and "(final)" or "")
|
||||||
for label, next in sorted(state.arcs.items()):
|
for label, next in sorted(state.arcs.items()):
|
||||||
print(" %s -> %d" % (label, dfa.index(next)))
|
print(f" {label} -> {dfa.index(next)}")
|
||||||
|
|
||||||
def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
def simplify_dfa(self, dfa: list["DFAState"]) -> None:
|
||||||
# This is not theoretically optimal, but works well enough.
|
# This is not theoretically optimal, but works well enough.
|
||||||
# Algorithm: repeatedly look for two states that have the same
|
# Algorithm: repeatedly look for two states that have the same
|
||||||
# set of arcs (same labels pointing to the same nodes) and
|
# set of arcs (same labels pointing to the same nodes) and
|
||||||
@ -280,7 +269,7 @@ def simplify_dfa(self, dfa: List["DFAState"]) -> None:
|
|||||||
changes = True
|
changes = True
|
||||||
break
|
break
|
||||||
|
|
||||||
def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_rhs(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# RHS: ALT ('|' ALT)*
|
# RHS: ALT ('|' ALT)*
|
||||||
a, z = self.parse_alt()
|
a, z = self.parse_alt()
|
||||||
if self.value != "|":
|
if self.value != "|":
|
||||||
@ -297,7 +286,7 @@ def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
|
|||||||
z.addarc(zz)
|
z.addarc(zz)
|
||||||
return aa, zz
|
return aa, zz
|
||||||
|
|
||||||
def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_alt(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# ALT: ITEM+
|
# ALT: ITEM+
|
||||||
a, b = self.parse_item()
|
a, b = self.parse_item()
|
||||||
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
|
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
|
||||||
@ -306,7 +295,7 @@ def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
|
|||||||
b = d
|
b = d
|
||||||
return a, b
|
return a, b
|
||||||
|
|
||||||
def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_item(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
||||||
if self.value == "[":
|
if self.value == "[":
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
@ -326,7 +315,7 @@ def parse_item(self) -> Tuple["NFAState", "NFAState"]:
|
|||||||
else:
|
else:
|
||||||
return a, a
|
return a, a
|
||||||
|
|
||||||
def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
|
def parse_atom(self) -> tuple["NFAState", "NFAState"]:
|
||||||
# ATOM: '(' RHS ')' | NAME | STRING
|
# ATOM: '(' RHS ')' | NAME | STRING
|
||||||
if self.value == "(":
|
if self.value == "(":
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
@ -341,15 +330,12 @@ def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
|
|||||||
return a, z
|
return a, z
|
||||||
else:
|
else:
|
||||||
self.raise_error(
|
self.raise_error(
|
||||||
"expected (...) or NAME or STRING, got %s/%s", self.type, self.value
|
f"expected (...) or NAME or STRING, got {self.type}/{self.value}"
|
||||||
)
|
)
|
||||||
raise AssertionError
|
|
||||||
|
|
||||||
def expect(self, type: int, value: Optional[Any] = None) -> str:
|
def expect(self, type: int, value: Optional[Any] = None) -> str:
|
||||||
if self.type != type or (value is not None and self.value != value):
|
if self.type != type or (value is not None and self.value != value):
|
||||||
self.raise_error(
|
self.raise_error(f"expected {type}/{value}, got {self.type}/{self.value}")
|
||||||
"expected %s/%s, got %s/%s", type, value, self.type, self.value
|
|
||||||
)
|
|
||||||
value = self.value
|
value = self.value
|
||||||
self.gettoken()
|
self.gettoken()
|
||||||
return value
|
return value
|
||||||
@ -361,17 +347,14 @@ def gettoken(self) -> None:
|
|||||||
self.type, self.value, self.begin, self.end, self.line = tup
|
self.type, self.value, self.begin, self.end, self.line = tup
|
||||||
# print token.tok_name[self.type], repr(self.value)
|
# print token.tok_name[self.type], repr(self.value)
|
||||||
|
|
||||||
def raise_error(self, msg: str, *args: Any) -> NoReturn:
|
def raise_error(self, msg: str) -> NoReturn:
|
||||||
if args:
|
raise SyntaxError(
|
||||||
try:
|
msg, (str(self.filename), self.end[0], self.end[1], self.line)
|
||||||
msg = msg % args
|
)
|
||||||
except Exception:
|
|
||||||
msg = " ".join([msg] + list(map(str, args)))
|
|
||||||
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
|
|
||||||
|
|
||||||
|
|
||||||
class NFAState:
|
class NFAState:
|
||||||
arcs: List[Tuple[Optional[str], "NFAState"]]
|
arcs: list[tuple[Optional[str], "NFAState"]]
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.arcs = [] # list of (label, NFAState) pairs
|
self.arcs = [] # list of (label, NFAState) pairs
|
||||||
@ -383,11 +366,11 @@ def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
|
|||||||
|
|
||||||
|
|
||||||
class DFAState:
|
class DFAState:
|
||||||
nfaset: Dict[NFAState, Any]
|
nfaset: dict[NFAState, Any]
|
||||||
isfinal: bool
|
isfinal: bool
|
||||||
arcs: Dict[str, "DFAState"]
|
arcs: dict[str, "DFAState"]
|
||||||
|
|
||||||
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
|
def __init__(self, nfaset: dict[NFAState, Any], final: NFAState) -> None:
|
||||||
assert isinstance(nfaset, dict)
|
assert isinstance(nfaset, dict)
|
||||||
assert isinstance(next(iter(nfaset)), NFAState)
|
assert isinstance(next(iter(nfaset)), NFAState)
|
||||||
assert isinstance(final, NFAState)
|
assert isinstance(final, NFAState)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
"""Token constants (from "token.h")."""
|
"""Token constants (from "token.h")."""
|
||||||
|
|
||||||
from typing import Dict, Final
|
from typing import Final
|
||||||
|
|
||||||
# Taken from Python (r53757) and modified to include some tokens
|
# Taken from Python (r53757) and modified to include some tokens
|
||||||
# originally monkeypatched in by pgen2.tokenize
|
# originally monkeypatched in by pgen2.tokenize
|
||||||
@ -74,7 +74,7 @@
|
|||||||
NT_OFFSET: Final = 256
|
NT_OFFSET: Final = 256
|
||||||
# --end constants--
|
# --end constants--
|
||||||
|
|
||||||
tok_name: Final[Dict[int, str]] = {}
|
tok_name: Final[dict[int, str]] = {}
|
||||||
for _name, _value in list(globals().items()):
|
for _name, _value in list(globals().items()):
|
||||||
if type(_value) is int:
|
if type(_value) is int:
|
||||||
tok_name[_value] = _name
|
tok_name[_value] = _name
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -12,18 +12,8 @@
|
|||||||
|
|
||||||
# mypy: allow-untyped-defs, allow-incomplete-defs
|
# mypy: allow-untyped-defs, allow-incomplete-defs
|
||||||
|
|
||||||
from typing import (
|
from collections.abc import Iterable, Iterator
|
||||||
Any,
|
from typing import Any, Optional, TypeVar, Union
|
||||||
Dict,
|
|
||||||
Iterable,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from blib2to3.pgen2.grammar import Grammar
|
from blib2to3.pgen2.grammar import Grammar
|
||||||
|
|
||||||
@ -34,7 +24,7 @@
|
|||||||
|
|
||||||
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
|
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
|
||||||
|
|
||||||
_type_reprs: Dict[int, Union[str, int]] = {}
|
_type_reprs: dict[int, Union[str, int]] = {}
|
||||||
|
|
||||||
|
|
||||||
def type_repr(type_num: int) -> Union[str, int]:
|
def type_repr(type_num: int) -> Union[str, int]:
|
||||||
@ -57,8 +47,8 @@ def type_repr(type_num: int) -> Union[str, int]:
|
|||||||
_P = TypeVar("_P", bound="Base")
|
_P = TypeVar("_P", bound="Base")
|
||||||
|
|
||||||
NL = Union["Node", "Leaf"]
|
NL = Union["Node", "Leaf"]
|
||||||
Context = Tuple[str, Tuple[int, int]]
|
Context = tuple[str, tuple[int, int]]
|
||||||
RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]]
|
RawNode = tuple[int, Optional[str], Optional[Context], Optional[list[NL]]]
|
||||||
|
|
||||||
|
|
||||||
class Base:
|
class Base:
|
||||||
@ -74,7 +64,7 @@ class Base:
|
|||||||
# Default values for instance variables
|
# Default values for instance variables
|
||||||
type: int # int: token number (< 256) or symbol number (>= 256)
|
type: int # int: token number (< 256) or symbol number (>= 256)
|
||||||
parent: Optional["Node"] = None # Parent node pointer, or None
|
parent: Optional["Node"] = None # Parent node pointer, or None
|
||||||
children: List[NL] # List of subnodes
|
children: list[NL] # List of subnodes
|
||||||
was_changed: bool = False
|
was_changed: bool = False
|
||||||
was_checked: bool = False
|
was_checked: bool = False
|
||||||
|
|
||||||
@ -135,7 +125,7 @@ def pre_order(self) -> Iterator[NL]:
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def replace(self, new: Union[NL, List[NL]]) -> None:
|
def replace(self, new: Union[NL, list[NL]]) -> None:
|
||||||
"""Replace this node with a new one in the parent."""
|
"""Replace this node with a new one in the parent."""
|
||||||
assert self.parent is not None, str(self)
|
assert self.parent is not None, str(self)
|
||||||
assert new is not None
|
assert new is not None
|
||||||
@ -242,16 +232,16 @@ def get_suffix(self) -> str:
|
|||||||
class Node(Base):
|
class Node(Base):
|
||||||
"""Concrete implementation for interior nodes."""
|
"""Concrete implementation for interior nodes."""
|
||||||
|
|
||||||
fixers_applied: Optional[List[Any]]
|
fixers_applied: Optional[list[Any]]
|
||||||
used_names: Optional[Set[str]]
|
used_names: Optional[set[str]]
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
type: int,
|
type: int,
|
||||||
children: List[NL],
|
children: list[NL],
|
||||||
context: Optional[Any] = None,
|
context: Optional[Any] = None,
|
||||||
prefix: Optional[str] = None,
|
prefix: Optional[str] = None,
|
||||||
fixers_applied: Optional[List[Any]] = None,
|
fixers_applied: Optional[list[Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Initializer.
|
Initializer.
|
||||||
@ -278,11 +268,7 @@ def __init__(
|
|||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
"""Return a canonical string representation."""
|
"""Return a canonical string representation."""
|
||||||
assert self.type is not None
|
assert self.type is not None
|
||||||
return "{}({}, {!r})".format(
|
return f"{self.__class__.__name__}({type_repr(self.type)}, {self.children!r})"
|
||||||
self.__class__.__name__,
|
|
||||||
type_repr(self.type),
|
|
||||||
self.children,
|
|
||||||
)
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
"""
|
"""
|
||||||
@ -363,12 +349,12 @@ def append_child(self, child: NL) -> None:
|
|||||||
self.invalidate_sibling_maps()
|
self.invalidate_sibling_maps()
|
||||||
|
|
||||||
def invalidate_sibling_maps(self) -> None:
|
def invalidate_sibling_maps(self) -> None:
|
||||||
self.prev_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
self.prev_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||||
self.next_sibling_map: Optional[Dict[int, Optional[NL]]] = None
|
self.next_sibling_map: Optional[dict[int, Optional[NL]]] = None
|
||||||
|
|
||||||
def update_sibling_maps(self) -> None:
|
def update_sibling_maps(self) -> None:
|
||||||
_prev: Dict[int, Optional[NL]] = {}
|
_prev: dict[int, Optional[NL]] = {}
|
||||||
_next: Dict[int, Optional[NL]] = {}
|
_next: dict[int, Optional[NL]] = {}
|
||||||
self.prev_sibling_map = _prev
|
self.prev_sibling_map = _prev
|
||||||
self.next_sibling_map = _next
|
self.next_sibling_map = _next
|
||||||
previous: Optional[NL] = None
|
previous: Optional[NL] = None
|
||||||
@ -384,11 +370,11 @@ class Leaf(Base):
|
|||||||
|
|
||||||
# Default values for instance variables
|
# Default values for instance variables
|
||||||
value: str
|
value: str
|
||||||
fixers_applied: List[Any]
|
fixers_applied: list[Any]
|
||||||
bracket_depth: int
|
bracket_depth: int
|
||||||
# Changed later in brackets.py
|
# Changed later in brackets.py
|
||||||
opening_bracket: Optional["Leaf"] = None
|
opening_bracket: Optional["Leaf"] = None
|
||||||
used_names: Optional[Set[str]]
|
used_names: Optional[set[str]]
|
||||||
_prefix = "" # Whitespace and comments preceding this token in the input
|
_prefix = "" # Whitespace and comments preceding this token in the input
|
||||||
lineno: int = 0 # Line where this token starts in the input
|
lineno: int = 0 # Line where this token starts in the input
|
||||||
column: int = 0 # Column where this token starts in the input
|
column: int = 0 # Column where this token starts in the input
|
||||||
@ -403,7 +389,7 @@ def __init__(
|
|||||||
value: str,
|
value: str,
|
||||||
context: Optional[Context] = None,
|
context: Optional[Context] = None,
|
||||||
prefix: Optional[str] = None,
|
prefix: Optional[str] = None,
|
||||||
fixers_applied: List[Any] = [],
|
fixers_applied: list[Any] = [],
|
||||||
opening_bracket: Optional["Leaf"] = None,
|
opening_bracket: Optional["Leaf"] = None,
|
||||||
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
|
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -421,7 +407,7 @@ def __init__(
|
|||||||
self.value = value
|
self.value = value
|
||||||
if prefix is not None:
|
if prefix is not None:
|
||||||
self._prefix = prefix
|
self._prefix = prefix
|
||||||
self.fixers_applied: Optional[List[Any]] = fixers_applied[:]
|
self.fixers_applied: Optional[list[Any]] = fixers_applied[:]
|
||||||
self.children = []
|
self.children = []
|
||||||
self.opening_bracket = opening_bracket
|
self.opening_bracket = opening_bracket
|
||||||
self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
|
self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
|
||||||
@ -431,10 +417,9 @@ def __repr__(self) -> str:
|
|||||||
from .pgen2.token import tok_name
|
from .pgen2.token import tok_name
|
||||||
|
|
||||||
assert self.type is not None
|
assert self.type is not None
|
||||||
return "{}({}, {!r})".format(
|
return (
|
||||||
self.__class__.__name__,
|
f"{self.__class__.__name__}({tok_name.get(self.type, self.type)},"
|
||||||
tok_name.get(self.type, self.type),
|
f" {self.value!r})"
|
||||||
self.value,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
@ -503,7 +488,7 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL:
|
|||||||
return Leaf(type, value or "", context=context)
|
return Leaf(type, value or "", context=context)
|
||||||
|
|
||||||
|
|
||||||
_Results = Dict[str, NL]
|
_Results = dict[str, NL]
|
||||||
|
|
||||||
|
|
||||||
class BasePattern:
|
class BasePattern:
|
||||||
@ -537,7 +522,7 @@ def __repr__(self) -> str:
|
|||||||
args = [type_repr(self.type), self.content, self.name]
|
args = [type_repr(self.type), self.content, self.name]
|
||||||
while args and args[-1] is None:
|
while args and args[-1] is None:
|
||||||
del args[-1]
|
del args[-1]
|
||||||
return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, args)))
|
return f"{self.__class__.__name__}({', '.join(map(repr, args))})"
|
||||||
|
|
||||||
def _submatch(self, node, results=None) -> bool:
|
def _submatch(self, node, results=None) -> bool:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
@ -576,7 +561,7 @@ def match(self, node: NL, results: Optional[_Results] = None) -> bool:
|
|||||||
results[self.name] = node
|
results[self.name] = node
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool:
|
def match_seq(self, nodes: list[NL], results: Optional[_Results] = None) -> bool:
|
||||||
"""
|
"""
|
||||||
Does this pattern exactly match a sequence of nodes?
|
Does this pattern exactly match a sequence of nodes?
|
||||||
|
|
||||||
@ -586,7 +571,7 @@ def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool
|
|||||||
return False
|
return False
|
||||||
return self.match(nodes[0], results)
|
return self.match(nodes[0], results)
|
||||||
|
|
||||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||||
"""
|
"""
|
||||||
Generator yielding all matches for this pattern.
|
Generator yielding all matches for this pattern.
|
||||||
|
|
||||||
@ -816,7 +801,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||||
"""
|
"""
|
||||||
Generator yielding matches for a sequence of nodes.
|
Generator yielding matches for a sequence of nodes.
|
||||||
|
|
||||||
@ -861,7 +846,7 @@ def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
|||||||
if hasattr(sys, "getrefcount"):
|
if hasattr(sys, "getrefcount"):
|
||||||
sys.stderr = save_stderr
|
sys.stderr = save_stderr
|
||||||
|
|
||||||
def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
|
||||||
"""Helper to iteratively yield the matches."""
|
"""Helper to iteratively yield the matches."""
|
||||||
nodelen = len(nodes)
|
nodelen = len(nodes)
|
||||||
if 0 >= self.min:
|
if 0 >= self.min:
|
||||||
@ -890,7 +875,7 @@ def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:
|
|||||||
new_results.append((c0 + c1, r))
|
new_results.append((c0 + c1, r))
|
||||||
results = new_results
|
results = new_results
|
||||||
|
|
||||||
def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
def _bare_name_matches(self, nodes) -> tuple[int, _Results]:
|
||||||
"""Special optimized matcher for bare_name."""
|
"""Special optimized matcher for bare_name."""
|
||||||
count = 0
|
count = 0
|
||||||
r = {} # type: _Results
|
r = {} # type: _Results
|
||||||
@ -907,7 +892,7 @@ def _bare_name_matches(self, nodes) -> Tuple[int, _Results]:
|
|||||||
r[self.name] = nodes[:count]
|
r[self.name] = nodes[:count]
|
||||||
return count, r
|
return count, r
|
||||||
|
|
||||||
def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]:
|
def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]:
|
||||||
"""Helper to recursively yield the matches."""
|
"""Helper to recursively yield the matches."""
|
||||||
assert self.content is not None
|
assert self.content is not None
|
||||||
if count >= self.min:
|
if count >= self.min:
|
||||||
@ -944,7 +929,7 @@ def match_seq(self, nodes, results=None) -> bool:
|
|||||||
# We only match an empty sequence of nodes in its entirety
|
# We only match an empty sequence of nodes in its entirety
|
||||||
return len(nodes) == 0
|
return len(nodes) == 0
|
||||||
|
|
||||||
def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
|
||||||
if self.content is None:
|
if self.content is None:
|
||||||
# Return a match if there is an empty sequence
|
# Return a match if there is an empty sequence
|
||||||
if len(nodes) == 0:
|
if len(nodes) == 0:
|
||||||
@ -957,8 +942,8 @@ def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:
|
|||||||
|
|
||||||
|
|
||||||
def generate_matches(
|
def generate_matches(
|
||||||
patterns: List[BasePattern], nodes: List[NL]
|
patterns: list[BasePattern], nodes: list[NL]
|
||||||
) -> Iterator[Tuple[int, _Results]]:
|
) -> Iterator[tuple[int, _Results]]:
|
||||||
"""
|
"""
|
||||||
Generator yielding matches for a sequence of patterns and nodes.
|
Generator yielding matches for a sequence of patterns and nodes.
|
||||||
|
|
||||||
|
17
tests/data/cases/annotations.py
Normal file
17
tests/data/cases/annotations.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# regression test for #1765
|
||||||
|
class Foo:
|
||||||
|
def foo(self):
|
||||||
|
if True:
|
||||||
|
content_ids: Mapping[
|
||||||
|
str, Optional[ContentId]
|
||||||
|
] = self.publisher_content_store.store_config_contents(files)
|
||||||
|
|
||||||
|
# output
|
||||||
|
|
||||||
|
# regression test for #1765
|
||||||
|
class Foo:
|
||||||
|
def foo(self):
|
||||||
|
if True:
|
||||||
|
content_ids: Mapping[str, Optional[ContentId]] = (
|
||||||
|
self.publisher_content_store.store_config_contents(files)
|
||||||
|
)
|
24
tests/data/cases/backslash_before_indent.py
Normal file
24
tests/data/cases/backslash_before_indent.py
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# flags: --minimum-version=3.10
|
||||||
|
class Plotter:
|
||||||
|
\
|
||||||
|
pass
|
||||||
|
|
||||||
|
class AnotherCase:
|
||||||
|
\
|
||||||
|
"""Some
|
||||||
|
\
|
||||||
|
Docstring
|
||||||
|
"""
|
||||||
|
|
||||||
|
# output
|
||||||
|
|
||||||
|
class Plotter:
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AnotherCase:
|
||||||
|
"""Some
|
||||||
|
\
|
||||||
|
Docstring
|
||||||
|
"""
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --preview
|
|
||||||
# long variable name
|
# long variable name
|
||||||
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
|
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
|
||||||
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
|
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
|
||||||
@ -32,7 +31,8 @@
|
|||||||
raise ValueError(err.format(key))
|
raise ValueError(err.format(key))
|
||||||
concatenated_strings = "some strings that are " "concatenated implicitly, so if you put them on separate " "lines it will fit"
|
concatenated_strings = "some strings that are " "concatenated implicitly, so if you put them on separate " "lines it will fit"
|
||||||
del concatenated_strings, string_variable_name, normal_function_name, normal_name, need_more_to_make_the_line_long_enough
|
del concatenated_strings, string_variable_name, normal_function_name, normal_name, need_more_to_make_the_line_long_enough
|
||||||
|
del ([], name_1, name_2), [(), [], name_4, name_3], name_1[[name_2 for name_1 in name_0]]
|
||||||
|
del (),
|
||||||
|
|
||||||
# output
|
# output
|
||||||
|
|
||||||
@ -92,3 +92,9 @@
|
|||||||
normal_name,
|
normal_name,
|
||||||
need_more_to_make_the_line_long_enough,
|
need_more_to_make_the_line_long_enough,
|
||||||
)
|
)
|
||||||
|
del (
|
||||||
|
([], name_1, name_2),
|
||||||
|
[(), [], name_4, name_3],
|
||||||
|
name_1[[name_2 for name_1 in name_0]],
|
||||||
|
)
|
||||||
|
del ((),)
|
@ -14,5 +14,7 @@ def bob(): # pylint: disable=W9016
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def bobtwo(): # some comment here
|
def bobtwo():
|
||||||
|
|
||||||
|
# some comment here
|
||||||
pass
|
pass
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --minimum-version=3.8
|
|
||||||
with \
|
with \
|
||||||
make_context_manager1() as cm1, \
|
make_context_manager1() as cm1, \
|
||||||
make_context_manager2() as cm2, \
|
make_context_manager2() as cm2, \
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --minimum-version=3.9
|
|
||||||
with \
|
with \
|
||||||
make_context_manager1() as cm1, \
|
make_context_manager1() as cm1, \
|
||||||
make_context_manager2() as cm2, \
|
make_context_manager2() as cm2, \
|
||||||
@ -85,6 +84,31 @@ async def func():
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# don't remove the brackets here, it changes the meaning of the code.
|
||||||
|
with (x, y) as z:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# don't remove the brackets here, it changes the meaning of the code.
|
||||||
|
# even though the code will always trigger a runtime error
|
||||||
|
with (name_5, name_4), name_5:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test_tuple_as_contextmanager():
|
||||||
|
from contextlib import nullcontext
|
||||||
|
|
||||||
|
try:
|
||||||
|
with (nullcontext(),nullcontext()),nullcontext():
|
||||||
|
pass
|
||||||
|
except TypeError:
|
||||||
|
# test passed
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# this should be a type error
|
||||||
|
assert False
|
||||||
|
|
||||||
# output
|
# output
|
||||||
|
|
||||||
|
|
||||||
@ -173,3 +197,28 @@ async def func():
|
|||||||
some_other_function(argument1, argument2, argument3="some_value"),
|
some_other_function(argument1, argument2, argument3="some_value"),
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# don't remove the brackets here, it changes the meaning of the code.
|
||||||
|
with (x, y) as z:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# don't remove the brackets here, it changes the meaning of the code.
|
||||||
|
# even though the code will always trigger a runtime error
|
||||||
|
with (name_5, name_4), name_5:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test_tuple_as_contextmanager():
|
||||||
|
from contextlib import nullcontext
|
||||||
|
|
||||||
|
try:
|
||||||
|
with (nullcontext(), nullcontext()), nullcontext():
|
||||||
|
pass
|
||||||
|
except TypeError:
|
||||||
|
# test passed
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# this should be a type error
|
||||||
|
assert False
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --minimum-version=3.9
|
|
||||||
# This file uses parenthesized context managers introduced in Python 3.9.
|
# This file uses parenthesized context managers introduced in Python 3.9.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --preview
|
|
||||||
"""
|
"""
|
||||||
87 characters ............................................................................
|
87 characters ............................................................................
|
||||||
"""
|
"""
|
13
tests/data/cases/fmtonoff6.py
Normal file
13
tests/data/cases/fmtonoff6.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Regression test for https://github.com/psf/black/issues/2478.
|
||||||
|
def foo():
|
||||||
|
arr = (
|
||||||
|
(3833567325051000, 5, 1, 2, 4229.25, 6, 0),
|
||||||
|
# fmt: off
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Regression test for https://github.com/psf/black/issues/3458.
|
||||||
|
dependencies = {
|
||||||
|
a: b,
|
||||||
|
# fmt: off
|
||||||
|
}
|
9
tests/data/cases/fmtskip10.py
Normal file
9
tests/data/cases/fmtskip10.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# flags: --preview
|
||||||
|
def foo(): return "mock" # fmt: skip
|
||||||
|
if True: print("yay") # fmt: skip
|
||||||
|
for i in range(10): print(i) # fmt: skip
|
||||||
|
|
||||||
|
j = 1 # fmt: skip
|
||||||
|
while j < 10: j += 1 # fmt: skip
|
||||||
|
|
||||||
|
b = [c for c in "A very long string that would normally generate some kind of collapse, since it is this long"] # fmt: skip
|
6
tests/data/cases/fmtskip11.py
Normal file
6
tests/data/cases/fmtskip11.py
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
def foo():
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# comment 1 # fmt: skip
|
||||||
|
# comment 2
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --preview
|
|
||||||
print () # fmt: skip
|
print () # fmt: skip
|
||||||
print () # fmt:skip
|
print () # fmt:skip
|
||||||
|
|
||||||
|
@ -156,6 +156,7 @@ def something(self):
|
|||||||
|
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --preview
|
|
||||||
x = "\x1F"
|
x = "\x1F"
|
||||||
x = "\\x1B"
|
x = "\\x1B"
|
||||||
x = "\\\x1B"
|
x = "\\\x1B"
|
67
tests/data/cases/fstring_quotations.py
Normal file
67
tests/data/cases/fstring_quotations.py
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
# Regression tests for long f-strings, including examples from issue #3623
|
||||||
|
|
||||||
|
a = (
|
||||||
|
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
|
||||||
|
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = (
|
||||||
|
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + \
|
||||||
|
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
|
||||||
|
a = f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"' + \
|
||||||
|
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
|
||||||
|
a = (
|
||||||
|
f'bbbbbbb"{"b"}"'
|
||||||
|
'aaaaaaaa'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = (
|
||||||
|
f'"{"b"}"'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = (
|
||||||
|
f'\"{"b"}\"'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = (
|
||||||
|
r'\"{"b"}\"'
|
||||||
|
)
|
||||||
|
|
||||||
|
# output
|
||||||
|
|
||||||
|
# Regression tests for long f-strings, including examples from issue #3623
|
||||||
|
|
||||||
|
a = (
|
||||||
|
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||||
|
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = (
|
||||||
|
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||||
|
)
|
||||||
|
|
||||||
|
a = (
|
||||||
|
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||||
|
+ f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = (
|
||||||
|
f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
+ f'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"{"b"}"'
|
||||||
|
)
|
||||||
|
|
||||||
|
a = f'bbbbbbb"{"b"}"' "aaaaaaaa"
|
||||||
|
|
||||||
|
a = f'"{"b"}"'
|
||||||
|
|
||||||
|
a = f'"{"b"}"'
|
||||||
|
|
||||||
|
a = r'\"{"b"}\"'
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
# flags: --preview --minimum-version=3.10
|
# flags: --minimum-version=3.10
|
||||||
# normal, short, function definition
|
# normal, short, function definition
|
||||||
def foo(a, b) -> tuple[int, float]: ...
|
def foo(a, b) -> tuple[int, float]: ...
|
||||||
|
|
||||||
@ -142,6 +142,7 @@ def SimplePyFn(
|
|||||||
Buffer[UInt8, 2],
|
Buffer[UInt8, 2],
|
||||||
Buffer[UInt8, 2],
|
Buffer[UInt8, 2],
|
||||||
]: ...
|
]: ...
|
||||||
|
|
||||||
# output
|
# output
|
||||||
# normal, short, function definition
|
# normal, short, function definition
|
||||||
def foo(a, b) -> tuple[int, float]: ...
|
def foo(a, b) -> tuple[int, float]: ...
|
||||||
|
@ -60,6 +60,64 @@ def func() -> ((also_super_long_type_annotation_that_may_cause_an_AST_related_cr
|
|||||||
argument1, (one, two,), argument4, argument5, argument6
|
argument1, (one, two,), argument4, argument5, argument6
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside parenthesised return type
|
||||||
|
int
|
||||||
|
):
|
||||||
|
...
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside parenthesised return type
|
||||||
|
# more
|
||||||
|
int
|
||||||
|
# another
|
||||||
|
):
|
||||||
|
...
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside parenthesised new union return type
|
||||||
|
int | str | bytes
|
||||||
|
):
|
||||||
|
...
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside plain tuple
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def foo(arg: (# comment with non-return annotation
|
||||||
|
int
|
||||||
|
# comment with non-return annotation
|
||||||
|
)):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def foo(arg: (# comment with non-return annotation
|
||||||
|
int | range | memoryview
|
||||||
|
# comment with non-return annotation
|
||||||
|
)):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def foo(arg: (# only before
|
||||||
|
int
|
||||||
|
)):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def foo(arg: (
|
||||||
|
int
|
||||||
|
# only after
|
||||||
|
)):
|
||||||
|
pass
|
||||||
|
|
||||||
|
variable: ( # annotation
|
||||||
|
because
|
||||||
|
# why not
|
||||||
|
)
|
||||||
|
|
||||||
|
variable: (
|
||||||
|
because
|
||||||
|
# why not
|
||||||
|
)
|
||||||
|
|
||||||
# output
|
# output
|
||||||
|
|
||||||
def f(
|
def f(
|
||||||
@ -176,3 +234,75 @@ def func() -> (
|
|||||||
argument5,
|
argument5,
|
||||||
argument6,
|
argument6,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside parenthesised return type
|
||||||
|
int
|
||||||
|
): ...
|
||||||
|
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside parenthesised return type
|
||||||
|
# more
|
||||||
|
int
|
||||||
|
# another
|
||||||
|
): ...
|
||||||
|
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside parenthesised new union return type
|
||||||
|
int
|
||||||
|
| str
|
||||||
|
| bytes
|
||||||
|
): ...
|
||||||
|
|
||||||
|
|
||||||
|
def foo() -> (
|
||||||
|
# comment inside plain tuple
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def foo(
|
||||||
|
arg: ( # comment with non-return annotation
|
||||||
|
int
|
||||||
|
# comment with non-return annotation
|
||||||
|
),
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def foo(
|
||||||
|
arg: ( # comment with non-return annotation
|
||||||
|
int
|
||||||
|
| range
|
||||||
|
| memoryview
|
||||||
|
# comment with non-return annotation
|
||||||
|
),
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def foo(arg: int): # only before
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def foo(
|
||||||
|
arg: (
|
||||||
|
int
|
||||||
|
# only after
|
||||||
|
),
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
variable: ( # annotation
|
||||||
|
because
|
||||||
|
# why not
|
||||||
|
)
|
||||||
|
|
||||||
|
variable: (
|
||||||
|
because
|
||||||
|
# why not
|
||||||
|
)
|
||||||
|
307
tests/data/cases/generics_wrapping.py
Normal file
307
tests/data/cases/generics_wrapping.py
Normal file
@ -0,0 +1,307 @@
|
|||||||
|
# flags: --minimum-version=3.12
|
||||||
|
def plain[T, B](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def arg_magic[T, B](a: T, b: T,) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def type_param_magic[T, B,](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def both_magic[T, B,](a: T, b: T,) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def plain_multiline[
|
||||||
|
T,
|
||||||
|
B
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def arg_magic_multiline[
|
||||||
|
T,
|
||||||
|
B
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def type_param_magic_multiline[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def both_magic_multiline[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def plain_mixed1[
|
||||||
|
T,
|
||||||
|
B
|
||||||
|
](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def plain_mixed2[T, B](
|
||||||
|
a: T,
|
||||||
|
b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def arg_magic_mixed1[
|
||||||
|
T,
|
||||||
|
B
|
||||||
|
](a: T, b: T,) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def arg_magic_mixed2[T, B](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def type_param_magic_mixed1[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def type_param_magic_mixed2[T, B,](
|
||||||
|
a: T,
|
||||||
|
b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def both_magic_mixed1[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](a: T, b: T,) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def both_magic_mixed2[T, B,](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def something_something_function[
|
||||||
|
T: Model
|
||||||
|
](param: list[int], other_param: type[T], *, some_other_param: bool = True) -> QuerySet[
|
||||||
|
T
|
||||||
|
]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def func[A_LOT_OF_GENERIC_TYPES: AreBeingDefinedHere, LIKE_THIS, AND_THIS, ANOTHER_ONE, AND_YET_ANOTHER_ONE: ThisOneHasTyping](a: T, b: T, c: T, d: T, e: T, f: T, g: T, h: T, i: T, j: T, k: T, l: T, m: T, n: T, o: T, p: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def with_random_comments[
|
||||||
|
Z
|
||||||
|
# bye
|
||||||
|
]():
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def func[
|
||||||
|
T, # comment
|
||||||
|
U # comment
|
||||||
|
,
|
||||||
|
Z: # comment
|
||||||
|
int
|
||||||
|
](): pass
|
||||||
|
|
||||||
|
|
||||||
|
def func[
|
||||||
|
T, # comment but it's long so it doesn't just move to the end of the line
|
||||||
|
U # comment comment comm comm ent ent
|
||||||
|
,
|
||||||
|
Z: # comment ent ent comm comm comment
|
||||||
|
int
|
||||||
|
](): pass
|
||||||
|
|
||||||
|
|
||||||
|
# output
|
||||||
|
def plain[T, B](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def arg_magic[T, B](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def type_param_magic[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T, b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def both_magic[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def plain_multiline[T, B](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def arg_magic_multiline[T, B](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def type_param_magic_multiline[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T, b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def both_magic_multiline[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def plain_mixed1[T, B](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def plain_mixed2[T, B](a: T, b: T) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def arg_magic_mixed1[T, B](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def arg_magic_mixed2[T, B](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def type_param_magic_mixed1[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T, b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def type_param_magic_mixed2[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T, b: T
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def both_magic_mixed1[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def both_magic_mixed2[
|
||||||
|
T,
|
||||||
|
B,
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def something_something_function[T: Model](
|
||||||
|
param: list[int], other_param: type[T], *, some_other_param: bool = True
|
||||||
|
) -> QuerySet[T]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def func[
|
||||||
|
A_LOT_OF_GENERIC_TYPES: AreBeingDefinedHere,
|
||||||
|
LIKE_THIS,
|
||||||
|
AND_THIS,
|
||||||
|
ANOTHER_ONE,
|
||||||
|
AND_YET_ANOTHER_ONE: ThisOneHasTyping,
|
||||||
|
](
|
||||||
|
a: T,
|
||||||
|
b: T,
|
||||||
|
c: T,
|
||||||
|
d: T,
|
||||||
|
e: T,
|
||||||
|
f: T,
|
||||||
|
g: T,
|
||||||
|
h: T,
|
||||||
|
i: T,
|
||||||
|
j: T,
|
||||||
|
k: T,
|
||||||
|
l: T,
|
||||||
|
m: T,
|
||||||
|
n: T,
|
||||||
|
o: T,
|
||||||
|
p: T,
|
||||||
|
) -> T:
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def with_random_comments[
|
||||||
|
Z
|
||||||
|
# bye
|
||||||
|
]():
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def func[T, U, Z: int](): # comment # comment # comment
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def func[
|
||||||
|
T, # comment but it's long so it doesn't just move to the end of the line
|
||||||
|
U, # comment comment comm comm ent ent
|
||||||
|
Z: int, # comment ent ent comm comm comment
|
||||||
|
]():
|
||||||
|
pass
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --preview
|
|
||||||
m2 = None if not isinstance(dist, Normal) else m** 2 + s * 2
|
m2 = None if not isinstance(dist, Normal) else m** 2 + s * 2
|
||||||
m3 = None if not isinstance(dist, Normal) else m ** 2 + s * 2
|
m3 = None if not isinstance(dist, Normal) else m ** 2 + s * 2
|
||||||
m4 = None if not isinstance(dist, Normal) else m**2 + s * 2
|
m4 = None if not isinstance(dist, Normal) else m**2 + s * 2
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --preview
|
|
||||||
def func(
|
def func(
|
||||||
arg1,
|
arg1,
|
||||||
arg2,
|
arg2,
|
@ -1,7 +1,6 @@
|
|||||||
# flags: --preview
|
|
||||||
"""I am a very helpful module docstring.
|
"""I am a very helpful module docstring.
|
||||||
|
|
||||||
With trailing spaces (only removed with unify_docstring_detection on):
|
With trailing spaces:
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
||||||
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||||
Ut enim ad minim veniam,
|
Ut enim ad minim veniam,
|
||||||
@ -39,7 +38,7 @@
|
|||||||
# output
|
# output
|
||||||
"""I am a very helpful module docstring.
|
"""I am a very helpful module docstring.
|
||||||
|
|
||||||
With trailing spaces (only removed with unify_docstring_detection on):
|
With trailing spaces:
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
||||||
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||||
Ut enim ad minim veniam,
|
Ut enim ad minim veniam,
|
||||||
|
@ -62,5 +62,4 @@ class MultilineDocstringsAsWell:
|
|||||||
|
|
||||||
|
|
||||||
class SingleQuotedDocstring:
|
class SingleQuotedDocstring:
|
||||||
|
|
||||||
"I'm a docstring but I don't even get triple quotes."
|
"I'm a docstring but I don't even get triple quotes."
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# flags: --preview --minimum-version=3.10
|
# flags: --minimum-version=3.10
|
||||||
match match:
|
match match:
|
||||||
case "test" if case != "not very loooooooooooooog condition": # comment
|
case "test" if case != "not very loooooooooooooog condition": # comment
|
||||||
pass
|
pass
|
||||||
|
8
tests/data/cases/pep646_typed_star_arg_type_var_tuple.py
Normal file
8
tests/data/cases/pep646_typed_star_arg_type_var_tuple.py
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# flags: --minimum-version=3.11
|
||||||
|
|
||||||
|
|
||||||
|
def fn(*args: *tuple[*A, B]) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
fn.__annotations__
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --minimum-version=3.8
|
|
||||||
def positional_only_arg(a, /):
|
def positional_only_arg(a, /):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# flags: --minimum-version=3.8
|
|
||||||
(a := 1)
|
(a := 1)
|
||||||
(a := a)
|
(a := a)
|
||||||
if (match := pattern.search(data)) is None:
|
if (match := pattern.search(data)) is None:
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user