Draft for Black 2023 stable style (#3418)
This commit is contained in:
parent
226cbf0226
commit
c4bd2e31ce
29
CHANGES.md
29
CHANGES.md
@ -10,6 +10,35 @@
|
|||||||
|
|
||||||
<!-- Changes that affect Black's stable style -->
|
<!-- Changes that affect Black's stable style -->
|
||||||
|
|
||||||
|
- Introduce the 2023 stable style, which incorporates most aspects of last year's
|
||||||
|
preview style (#3418). Specific changes:
|
||||||
|
- Enforce empty lines before classes and functions with sticky leading comments
|
||||||
|
(#3302) (22.12.0)
|
||||||
|
- Reformat empty and whitespace-only files as either an empty file (if no newline is
|
||||||
|
present) or as a single newline character (if a newline is present) (#3348)
|
||||||
|
(22.12.0)
|
||||||
|
- Implicitly concatenated strings used as function args are now wrapped inside
|
||||||
|
parentheses (#3307) (22.12.0)
|
||||||
|
- Correctly handle trailing commas that are inside a line's leading non-nested parens
|
||||||
|
(#3370) (22.12.0)
|
||||||
|
- `--skip-string-normalization` / `-S` now prevents docstring prefixes from being
|
||||||
|
normalized as expected (#3168) (since 22.8.0)
|
||||||
|
- When using `--skip-magic-trailing-comma` or `-C`, trailing commas are stripped from
|
||||||
|
subscript expressions with more than 1 element (#3209) (22.8.0)
|
||||||
|
- Implicitly concatenated strings inside a list, set, or tuple are now wrapped inside
|
||||||
|
parentheses (#3162) (22.8.0)
|
||||||
|
- Fix a string merging/split issue when a comment is present in the middle of
|
||||||
|
implicitly concatenated strings on its own line (#3227) (22.8.0)
|
||||||
|
- Docstring quotes are no longer moved if it would violate the line length limit
|
||||||
|
(#3044, #3430) (22.6.0)
|
||||||
|
- Parentheses around return annotations are now managed (#2990) (22.6.0)
|
||||||
|
- Remove unnecessary parentheses around awaited objects (#2991) (22.6.0)
|
||||||
|
- Remove unnecessary parentheses in `with` statements (#2926) (22.6.0)
|
||||||
|
- Remove trailing newlines after code block open (#3035) (22.6.0)
|
||||||
|
- Code cell separators `#%%` are now standardised to `# %%` (#2919) (22.3.0)
|
||||||
|
- Remove unnecessary parentheses from `except` statements (#2939) (22.3.0)
|
||||||
|
- Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) (22.3.0)
|
||||||
|
- Avoid magic-trailing-comma in single-element subscripts (#2942) (22.3.0)
|
||||||
- Fix a crash when a colon line is marked between `# fmt: off` and `# fmt: on` (#3439)
|
- Fix a crash when a colon line is marked between `# fmt: off` and `# fmt: on` (#3439)
|
||||||
|
|
||||||
### Preview style
|
### Preview style
|
||||||
|
@ -194,7 +194,45 @@ that in-function vertical whitespace should only be used sparingly.
|
|||||||
_Black_ will allow single empty lines inside functions, and single and double empty
|
_Black_ will allow single empty lines inside functions, and single and double empty
|
||||||
lines on module level left by the original editors, except when they're within
|
lines on module level left by the original editors, except when they're within
|
||||||
parenthesized expressions. Since such expressions are always reformatted to fit minimal
|
parenthesized expressions. Since such expressions are always reformatted to fit minimal
|
||||||
space, this whitespace is lost.
|
space, this whitespace is lost. The other exception is that it will remove any empty
|
||||||
|
lines immediately following a statement that introduces a new indentation level.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# in:
|
||||||
|
|
||||||
|
def foo():
|
||||||
|
|
||||||
|
print("All the newlines above me should be deleted!")
|
||||||
|
|
||||||
|
|
||||||
|
if condition:
|
||||||
|
|
||||||
|
print("No newline above me!")
|
||||||
|
|
||||||
|
print("There is a newline above me, and that's OK!")
|
||||||
|
|
||||||
|
|
||||||
|
class Point:
|
||||||
|
|
||||||
|
x: int
|
||||||
|
y: int
|
||||||
|
|
||||||
|
# out:
|
||||||
|
|
||||||
|
def foo():
|
||||||
|
print("All the newlines above me should be deleted!")
|
||||||
|
|
||||||
|
|
||||||
|
if condition:
|
||||||
|
print("No newline above me!")
|
||||||
|
|
||||||
|
print("There is a newline above me, and that's OK!")
|
||||||
|
|
||||||
|
|
||||||
|
class Point:
|
||||||
|
x: int
|
||||||
|
y: int
|
||||||
|
```
|
||||||
|
|
||||||
It will also insert proper spacing before and after function definitions. It's one line
|
It will also insert proper spacing before and after function definitions. It's one line
|
||||||
before and after inner functions and two lines before and after module-level functions
|
before and after inner functions and two lines before and after module-level functions
|
||||||
|
@ -62,93 +62,3 @@ plain strings. User-made splits are respected when they do not exceed the line l
|
|||||||
limit. Line continuation backslashes are converted into parenthesized strings.
|
limit. Line continuation backslashes are converted into parenthesized strings.
|
||||||
Unnecessary parentheses are stripped. The stability and status of this feature is
|
Unnecessary parentheses are stripped. The stability and status of this feature is
|
||||||
tracked in [this issue](https://github.com/psf/black/issues/2188).
|
tracked in [this issue](https://github.com/psf/black/issues/2188).
|
||||||
|
|
||||||
### Improved empty line management
|
|
||||||
|
|
||||||
1. _Black_ will remove newlines in the beginning of new code blocks, i.e. when the
|
|
||||||
indentation level is increased. For example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def my_func():
|
|
||||||
|
|
||||||
print("The line above me will be deleted!")
|
|
||||||
```
|
|
||||||
|
|
||||||
will be changed to:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def my_func():
|
|
||||||
print("The line above me will be deleted!")
|
|
||||||
```
|
|
||||||
|
|
||||||
This new feature will be applied to **all code blocks**: `def`, `class`, `if`,
|
|
||||||
`for`, `while`, `with`, `case` and `match`.
|
|
||||||
|
|
||||||
2. _Black_ will enforce empty lines before classes and functions with leading comments.
|
|
||||||
For example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
some_var = 1
|
|
||||||
# Leading sticky comment
|
|
||||||
def my_func():
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
will be changed to:
|
|
||||||
|
|
||||||
```python
|
|
||||||
some_var = 1
|
|
||||||
|
|
||||||
|
|
||||||
# Leading sticky comment
|
|
||||||
def my_func():
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
### Improved parentheses management
|
|
||||||
|
|
||||||
_Black_ will format parentheses around return annotations similarly to other sets of
|
|
||||||
parentheses. For example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def foo() -> (int):
|
|
||||||
...
|
|
||||||
|
|
||||||
def foo() -> looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong:
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
will be changed to:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def foo() -> int:
|
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
def foo() -> (
|
|
||||||
looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong
|
|
||||||
):
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
And, extra parentheses in `await` expressions and `with` statements are removed. For
|
|
||||||
example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
with ((open("bla.txt")) as f, open("x")):
|
|
||||||
...
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
await (asyncio.sleep(1))
|
|
||||||
```
|
|
||||||
|
|
||||||
will be changed to:
|
|
||||||
|
|
||||||
```python
|
|
||||||
with open("bla.txt") as f, open("x"):
|
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
await asyncio.sleep(1)
|
|
||||||
```
|
|
||||||
|
@ -925,9 +925,6 @@ def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileCo
|
|||||||
valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
|
valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
|
||||||
`mode` is passed to :func:`format_str`.
|
`mode` is passed to :func:`format_str`.
|
||||||
"""
|
"""
|
||||||
if not mode.preview and not src_contents.strip():
|
|
||||||
raise NothingChanged
|
|
||||||
|
|
||||||
if mode.is_ipynb:
|
if mode.is_ipynb:
|
||||||
dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode)
|
dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode)
|
||||||
else:
|
else:
|
||||||
@ -1022,7 +1019,7 @@ def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileCon
|
|||||||
Operate cell-by-cell, only on code cells, only for Python notebooks.
|
Operate cell-by-cell, only on code cells, only for Python notebooks.
|
||||||
If the ``.ipynb`` originally had a trailing newline, it'll be preserved.
|
If the ``.ipynb`` originally had a trailing newline, it'll be preserved.
|
||||||
"""
|
"""
|
||||||
if mode.preview and not src_contents:
|
if not src_contents:
|
||||||
raise NothingChanged
|
raise NothingChanged
|
||||||
|
|
||||||
trailing_newline = src_contents[-1] == "\n"
|
trailing_newline = src_contents[-1] == "\n"
|
||||||
@ -1101,7 +1098,7 @@ def _format_str_once(src_contents: str, *, mode: Mode) -> str:
|
|||||||
for feature in {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
|
for feature in {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
|
||||||
if supports_feature(versions, feature)
|
if supports_feature(versions, feature)
|
||||||
}
|
}
|
||||||
normalize_fmt_off(src_node, preview=mode.preview)
|
normalize_fmt_off(src_node)
|
||||||
lines = LineGenerator(mode=mode, features=context_manager_features)
|
lines = LineGenerator(mode=mode, features=context_manager_features)
|
||||||
elt = EmptyLineTracker(mode=mode)
|
elt = EmptyLineTracker(mode=mode)
|
||||||
split_line_features = {
|
split_line_features = {
|
||||||
@ -1122,7 +1119,7 @@ def _format_str_once(src_contents: str, *, mode: Mode) -> str:
|
|||||||
dst_contents = []
|
dst_contents = []
|
||||||
for block in dst_blocks:
|
for block in dst_blocks:
|
||||||
dst_contents.extend(block.all_lines())
|
dst_contents.extend(block.all_lines())
|
||||||
if mode.preview and not dst_contents:
|
if not dst_contents:
|
||||||
# Use decode_bytes to retrieve the correct source newline (CRLF or LF),
|
# Use decode_bytes to retrieve the correct source newline (CRLF or LF),
|
||||||
# and check if normalized_content has more than one line
|
# and check if normalized_content has more than one line
|
||||||
normalized_content, _, newline = decode_bytes(src_contents.encode("utf-8"))
|
normalized_content, _, newline = decode_bytes(src_contents.encode("utf-8"))
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP}
|
FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP}
|
||||||
FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"}
|
FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"}
|
||||||
|
|
||||||
COMMENT_EXCEPTIONS = {True: " !:#'", False: " !:#'%"}
|
COMMENT_EXCEPTIONS = " !:#'"
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -50,7 +50,7 @@ class ProtoComment:
|
|||||||
consumed: int # how many characters of the original leaf's prefix did we consume
|
consumed: int # how many characters of the original leaf's prefix did we consume
|
||||||
|
|
||||||
|
|
||||||
def generate_comments(leaf: LN, *, preview: bool) -> Iterator[Leaf]:
|
def generate_comments(leaf: LN) -> Iterator[Leaf]:
|
||||||
"""Clean the prefix of the `leaf` and generate comments from it, if any.
|
"""Clean the prefix of the `leaf` and generate comments from it, if any.
|
||||||
|
|
||||||
Comments in lib2to3 are shoved into the whitespace prefix. This happens
|
Comments in lib2to3 are shoved into the whitespace prefix. This happens
|
||||||
@ -69,16 +69,12 @@ def generate_comments(leaf: LN, *, preview: bool) -> Iterator[Leaf]:
|
|||||||
Inline comments are emitted as regular token.COMMENT leaves. Standalone
|
Inline comments are emitted as regular token.COMMENT leaves. Standalone
|
||||||
are emitted with a fake STANDALONE_COMMENT token identifier.
|
are emitted with a fake STANDALONE_COMMENT token identifier.
|
||||||
"""
|
"""
|
||||||
for pc in list_comments(
|
for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
|
||||||
leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, preview=preview
|
|
||||||
):
|
|
||||||
yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
|
yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
|
||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=4096)
|
@lru_cache(maxsize=4096)
|
||||||
def list_comments(
|
def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
|
||||||
prefix: str, *, is_endmarker: bool, preview: bool
|
|
||||||
) -> List[ProtoComment]:
|
|
||||||
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
||||||
result: List[ProtoComment] = []
|
result: List[ProtoComment] = []
|
||||||
if not prefix or "#" not in prefix:
|
if not prefix or "#" not in prefix:
|
||||||
@ -104,7 +100,7 @@ def list_comments(
|
|||||||
comment_type = token.COMMENT # simple trailing comment
|
comment_type = token.COMMENT # simple trailing comment
|
||||||
else:
|
else:
|
||||||
comment_type = STANDALONE_COMMENT
|
comment_type = STANDALONE_COMMENT
|
||||||
comment = make_comment(line, preview=preview)
|
comment = make_comment(line)
|
||||||
result.append(
|
result.append(
|
||||||
ProtoComment(
|
ProtoComment(
|
||||||
type=comment_type, value=comment, newlines=nlines, consumed=consumed
|
type=comment_type, value=comment, newlines=nlines, consumed=consumed
|
||||||
@ -114,7 +110,7 @@ def list_comments(
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def make_comment(content: str, *, preview: bool) -> str:
|
def make_comment(content: str) -> str:
|
||||||
"""Return a consistently formatted comment from the given `content` string.
|
"""Return a consistently formatted comment from the given `content` string.
|
||||||
|
|
||||||
All comments (except for "##", "#!", "#:", '#'") should have a single
|
All comments (except for "##", "#!", "#:", '#'") should have a single
|
||||||
@ -135,26 +131,26 @@ def make_comment(content: str, *, preview: bool) -> str:
|
|||||||
and not content.lstrip().startswith("type:")
|
and not content.lstrip().startswith("type:")
|
||||||
):
|
):
|
||||||
content = " " + content[1:] # Replace NBSP by a simple space
|
content = " " + content[1:] # Replace NBSP by a simple space
|
||||||
if content and content[0] not in COMMENT_EXCEPTIONS[preview]:
|
if content and content[0] not in COMMENT_EXCEPTIONS:
|
||||||
content = " " + content
|
content = " " + content
|
||||||
return "#" + content
|
return "#" + content
|
||||||
|
|
||||||
|
|
||||||
def normalize_fmt_off(node: Node, *, preview: bool) -> None:
|
def normalize_fmt_off(node: Node) -> None:
|
||||||
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
||||||
try_again = True
|
try_again = True
|
||||||
while try_again:
|
while try_again:
|
||||||
try_again = convert_one_fmt_off_pair(node, preview=preview)
|
try_again = convert_one_fmt_off_pair(node)
|
||||||
|
|
||||||
|
|
||||||
def convert_one_fmt_off_pair(node: Node, *, preview: bool) -> bool:
|
def convert_one_fmt_off_pair(node: Node) -> bool:
|
||||||
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
||||||
|
|
||||||
Returns True if a pair was converted.
|
Returns True if a pair was converted.
|
||||||
"""
|
"""
|
||||||
for leaf in node.leaves():
|
for leaf in node.leaves():
|
||||||
previous_consumed = 0
|
previous_consumed = 0
|
||||||
for comment in list_comments(leaf.prefix, is_endmarker=False, preview=preview):
|
for comment in list_comments(leaf.prefix, is_endmarker=False):
|
||||||
if comment.value not in FMT_PASS:
|
if comment.value not in FMT_PASS:
|
||||||
previous_consumed = comment.consumed
|
previous_consumed = comment.consumed
|
||||||
continue
|
continue
|
||||||
@ -169,7 +165,7 @@ def convert_one_fmt_off_pair(node: Node, *, preview: bool) -> bool:
|
|||||||
if comment.value in FMT_SKIP and prev.type in WHITESPACE:
|
if comment.value in FMT_SKIP and prev.type in WHITESPACE:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ignored_nodes = list(generate_ignored_nodes(leaf, comment, preview=preview))
|
ignored_nodes = list(generate_ignored_nodes(leaf, comment))
|
||||||
if not ignored_nodes:
|
if not ignored_nodes:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -214,26 +210,24 @@ def convert_one_fmt_off_pair(node: Node, *, preview: bool) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def generate_ignored_nodes(
|
def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]:
|
||||||
leaf: Leaf, comment: ProtoComment, *, preview: bool
|
|
||||||
) -> Iterator[LN]:
|
|
||||||
"""Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
|
"""Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
|
||||||
|
|
||||||
If comment is skip, returns leaf only.
|
If comment is skip, returns leaf only.
|
||||||
Stops at the end of the block.
|
Stops at the end of the block.
|
||||||
"""
|
"""
|
||||||
if comment.value in FMT_SKIP:
|
if comment.value in FMT_SKIP:
|
||||||
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, preview=preview)
|
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment)
|
||||||
return
|
return
|
||||||
container: Optional[LN] = container_of(leaf)
|
container: Optional[LN] = container_of(leaf)
|
||||||
while container is not None and container.type != token.ENDMARKER:
|
while container is not None and container.type != token.ENDMARKER:
|
||||||
if is_fmt_on(container, preview=preview):
|
if is_fmt_on(container):
|
||||||
return
|
return
|
||||||
|
|
||||||
# fix for fmt: on in children
|
# fix for fmt: on in children
|
||||||
if children_contains_fmt_on(container, preview=preview):
|
if children_contains_fmt_on(container):
|
||||||
for index, child in enumerate(container.children):
|
for index, child in enumerate(container.children):
|
||||||
if isinstance(child, Leaf) and is_fmt_on(child, preview=preview):
|
if isinstance(child, Leaf) and is_fmt_on(child):
|
||||||
if child.type in CLOSING_BRACKETS:
|
if child.type in CLOSING_BRACKETS:
|
||||||
# This means `# fmt: on` is placed at a different bracket level
|
# This means `# fmt: on` is placed at a different bracket level
|
||||||
# than `# fmt: off`. This is an invalid use, but as a courtesy,
|
# than `# fmt: off`. This is an invalid use, but as a courtesy,
|
||||||
@ -244,14 +238,12 @@ def generate_ignored_nodes(
|
|||||||
if (
|
if (
|
||||||
child.type == token.INDENT
|
child.type == token.INDENT
|
||||||
and index < len(container.children) - 1
|
and index < len(container.children) - 1
|
||||||
and children_contains_fmt_on(
|
and children_contains_fmt_on(container.children[index + 1])
|
||||||
container.children[index + 1], preview=preview
|
|
||||||
)
|
|
||||||
):
|
):
|
||||||
# This means `# fmt: on` is placed right after an indentation
|
# This means `# fmt: on` is placed right after an indentation
|
||||||
# level, and we shouldn't swallow the previous INDENT token.
|
# level, and we shouldn't swallow the previous INDENT token.
|
||||||
return
|
return
|
||||||
if children_contains_fmt_on(child, preview=preview):
|
if children_contains_fmt_on(child):
|
||||||
return
|
return
|
||||||
yield child
|
yield child
|
||||||
else:
|
else:
|
||||||
@ -264,14 +256,14 @@ def generate_ignored_nodes(
|
|||||||
|
|
||||||
|
|
||||||
def _generate_ignored_nodes_from_fmt_skip(
|
def _generate_ignored_nodes_from_fmt_skip(
|
||||||
leaf: Leaf, comment: ProtoComment, *, preview: bool
|
leaf: Leaf, comment: ProtoComment
|
||||||
) -> Iterator[LN]:
|
) -> Iterator[LN]:
|
||||||
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
|
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
|
||||||
prev_sibling = leaf.prev_sibling
|
prev_sibling = leaf.prev_sibling
|
||||||
parent = leaf.parent
|
parent = leaf.parent
|
||||||
# Need to properly format the leaf prefix to compare it to comment.value,
|
# Need to properly format the leaf prefix to compare it to comment.value,
|
||||||
# which is also formatted
|
# which is also formatted
|
||||||
comments = list_comments(leaf.prefix, is_endmarker=False, preview=preview)
|
comments = list_comments(leaf.prefix, is_endmarker=False)
|
||||||
if not comments or comment.value != comments[0].value:
|
if not comments or comment.value != comments[0].value:
|
||||||
return
|
return
|
||||||
if prev_sibling is not None:
|
if prev_sibling is not None:
|
||||||
@ -305,12 +297,12 @@ def _generate_ignored_nodes_from_fmt_skip(
|
|||||||
yield from iter(ignored_nodes)
|
yield from iter(ignored_nodes)
|
||||||
|
|
||||||
|
|
||||||
def is_fmt_on(container: LN, preview: bool) -> bool:
|
def is_fmt_on(container: LN) -> bool:
|
||||||
"""Determine whether formatting is switched on within a container.
|
"""Determine whether formatting is switched on within a container.
|
||||||
Determined by whether the last `# fmt:` comment is `on` or `off`.
|
Determined by whether the last `# fmt:` comment is `on` or `off`.
|
||||||
"""
|
"""
|
||||||
fmt_on = False
|
fmt_on = False
|
||||||
for comment in list_comments(container.prefix, is_endmarker=False, preview=preview):
|
for comment in list_comments(container.prefix, is_endmarker=False):
|
||||||
if comment.value in FMT_ON:
|
if comment.value in FMT_ON:
|
||||||
fmt_on = True
|
fmt_on = True
|
||||||
elif comment.value in FMT_OFF:
|
elif comment.value in FMT_OFF:
|
||||||
@ -318,11 +310,11 @@ def is_fmt_on(container: LN, preview: bool) -> bool:
|
|||||||
return fmt_on
|
return fmt_on
|
||||||
|
|
||||||
|
|
||||||
def children_contains_fmt_on(container: LN, *, preview: bool) -> bool:
|
def children_contains_fmt_on(container: LN) -> bool:
|
||||||
"""Determine if children have formatting switched on."""
|
"""Determine if children have formatting switched on."""
|
||||||
for child in container.children:
|
for child in container.children:
|
||||||
leaf = first_leaf_of(child)
|
leaf = first_leaf_of(child)
|
||||||
if leaf is not None and is_fmt_on(leaf, preview=preview):
|
if leaf is not None and is_fmt_on(leaf):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
@ -117,7 +117,7 @@ def visit_default(self, node: LN) -> Iterator[Line]:
|
|||||||
"""Default `visit_*()` implementation. Recurses to children of `node`."""
|
"""Default `visit_*()` implementation. Recurses to children of `node`."""
|
||||||
if isinstance(node, Leaf):
|
if isinstance(node, Leaf):
|
||||||
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
|
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
|
||||||
for comment in generate_comments(node, preview=self.mode.preview):
|
for comment in generate_comments(node):
|
||||||
if any_open_brackets:
|
if any_open_brackets:
|
||||||
# any comment within brackets is subject to splitting
|
# any comment within brackets is subject to splitting
|
||||||
self.current_line.append(comment)
|
self.current_line.append(comment)
|
||||||
@ -221,30 +221,27 @@ def visit_dictsetmaker(self, node: Node) -> Iterator[Line]:
|
|||||||
|
|
||||||
def visit_funcdef(self, node: Node) -> Iterator[Line]:
|
def visit_funcdef(self, node: Node) -> Iterator[Line]:
|
||||||
"""Visit function definition."""
|
"""Visit function definition."""
|
||||||
if Preview.annotation_parens not in self.mode:
|
yield from self.line()
|
||||||
yield from self.visit_stmt(node, keywords={"def"}, parens=set())
|
|
||||||
else:
|
|
||||||
yield from self.line()
|
|
||||||
|
|
||||||
# Remove redundant brackets around return type annotation.
|
# Remove redundant brackets around return type annotation.
|
||||||
is_return_annotation = False
|
is_return_annotation = False
|
||||||
for child in node.children:
|
for child in node.children:
|
||||||
if child.type == token.RARROW:
|
if child.type == token.RARROW:
|
||||||
is_return_annotation = True
|
is_return_annotation = True
|
||||||
elif is_return_annotation:
|
elif is_return_annotation:
|
||||||
if child.type == syms.atom and child.children[0].type == token.LPAR:
|
if child.type == syms.atom and child.children[0].type == token.LPAR:
|
||||||
if maybe_make_parens_invisible_in_atom(
|
if maybe_make_parens_invisible_in_atom(
|
||||||
child,
|
child,
|
||||||
parent=node,
|
parent=node,
|
||||||
remove_brackets_around_comma=False,
|
remove_brackets_around_comma=False,
|
||||||
):
|
):
|
||||||
wrap_in_parentheses(node, child, visible=False)
|
|
||||||
else:
|
|
||||||
wrap_in_parentheses(node, child, visible=False)
|
wrap_in_parentheses(node, child, visible=False)
|
||||||
is_return_annotation = False
|
else:
|
||||||
|
wrap_in_parentheses(node, child, visible=False)
|
||||||
|
is_return_annotation = False
|
||||||
|
|
||||||
for child in node.children:
|
for child in node.children:
|
||||||
yield from self.visit(child)
|
yield from self.visit(child)
|
||||||
|
|
||||||
def visit_match_case(self, node: Node) -> Iterator[Line]:
|
def visit_match_case(self, node: Node) -> Iterator[Line]:
|
||||||
"""Visit either a match or case statement."""
|
"""Visit either a match or case statement."""
|
||||||
@ -332,8 +329,7 @@ def visit_power(self, node: Node) -> Iterator[Line]:
|
|||||||
):
|
):
|
||||||
wrap_in_parentheses(node, leaf)
|
wrap_in_parentheses(node, leaf)
|
||||||
|
|
||||||
if Preview.remove_redundant_parens in self.mode:
|
remove_await_parens(node)
|
||||||
remove_await_parens(node)
|
|
||||||
|
|
||||||
yield from self.visit_default(node)
|
yield from self.visit_default(node)
|
||||||
|
|
||||||
@ -375,24 +371,17 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
|||||||
if is_docstring(leaf) and "\\\n" not in leaf.value:
|
if is_docstring(leaf) and "\\\n" not in leaf.value:
|
||||||
# We're ignoring docstrings with backslash newline escapes because changing
|
# We're ignoring docstrings with backslash newline escapes because changing
|
||||||
# indentation of those changes the AST representation of the code.
|
# indentation of those changes the AST representation of the code.
|
||||||
if Preview.normalize_docstring_quotes_and_prefixes_properly in self.mode:
|
if self.mode.string_normalization:
|
||||||
# There was a bug where --skip-string-normalization wouldn't stop us
|
|
||||||
# from normalizing docstring prefixes. To maintain stability, we can
|
|
||||||
# only address this buggy behaviour while the preview style is enabled.
|
|
||||||
if self.mode.string_normalization:
|
|
||||||
docstring = normalize_string_prefix(leaf.value)
|
|
||||||
# visit_default() does handle string normalization for us, but
|
|
||||||
# since this method acts differently depending on quote style (ex.
|
|
||||||
# see padding logic below), there's a possibility for unstable
|
|
||||||
# formatting as visit_default() is called *after*. To avoid a
|
|
||||||
# situation where this function formats a docstring differently on
|
|
||||||
# the second pass, normalize it early.
|
|
||||||
docstring = normalize_string_quotes(docstring)
|
|
||||||
else:
|
|
||||||
docstring = leaf.value
|
|
||||||
else:
|
|
||||||
# ... otherwise, we'll keep the buggy behaviour >.<
|
|
||||||
docstring = normalize_string_prefix(leaf.value)
|
docstring = normalize_string_prefix(leaf.value)
|
||||||
|
# visit_default() does handle string normalization for us, but
|
||||||
|
# since this method acts differently depending on quote style (ex.
|
||||||
|
# see padding logic below), there's a possibility for unstable
|
||||||
|
# formatting as visit_default() is called *after*. To avoid a
|
||||||
|
# situation where this function formats a docstring differently on
|
||||||
|
# the second pass, normalize it early.
|
||||||
|
docstring = normalize_string_quotes(docstring)
|
||||||
|
else:
|
||||||
|
docstring = leaf.value
|
||||||
prefix = get_string_prefix(docstring)
|
prefix = get_string_prefix(docstring)
|
||||||
docstring = docstring[len(prefix) :] # Remove the prefix
|
docstring = docstring[len(prefix) :] # Remove the prefix
|
||||||
quote_char = docstring[0]
|
quote_char = docstring[0]
|
||||||
@ -432,7 +421,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
|||||||
quote = quote_char * quote_len
|
quote = quote_char * quote_len
|
||||||
|
|
||||||
# It's invalid to put closing single-character quotes on a new line.
|
# It's invalid to put closing single-character quotes on a new line.
|
||||||
if Preview.long_docstring_quotes_on_newline in self.mode and quote_len == 3:
|
if self.mode and quote_len == 3:
|
||||||
# We need to find the length of the last line of the docstring
|
# We need to find the length of the last line of the docstring
|
||||||
# to find if we can add the closing quotes to the line without
|
# to find if we can add the closing quotes to the line without
|
||||||
# exceeding the maximum line length.
|
# exceeding the maximum line length.
|
||||||
@ -473,14 +462,8 @@ def __post_init__(self) -> None:
|
|||||||
self.visit_try_stmt = partial(
|
self.visit_try_stmt = partial(
|
||||||
v, keywords={"try", "except", "else", "finally"}, parens=Ø
|
v, keywords={"try", "except", "else", "finally"}, parens=Ø
|
||||||
)
|
)
|
||||||
if self.mode.preview:
|
self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"})
|
||||||
self.visit_except_clause = partial(
|
self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"})
|
||||||
v, keywords={"except"}, parens={"except"}
|
|
||||||
)
|
|
||||||
self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"})
|
|
||||||
else:
|
|
||||||
self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
|
|
||||||
self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
|
|
||||||
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
|
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
|
||||||
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
|
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
|
||||||
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
|
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
|
||||||
@ -932,10 +915,7 @@ def bracket_split_build_line(
|
|||||||
break
|
break
|
||||||
|
|
||||||
leaves_to_track: Set[LeafID] = set()
|
leaves_to_track: Set[LeafID] = set()
|
||||||
if (
|
if component is _BracketSplitComponent.head:
|
||||||
Preview.handle_trailing_commas_in_head in original.mode
|
|
||||||
and component is _BracketSplitComponent.head
|
|
||||||
):
|
|
||||||
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
|
||||||
# Populate the line
|
# Populate the line
|
||||||
for leaf in leaves:
|
for leaf in leaves:
|
||||||
@ -1109,7 +1089,7 @@ def normalize_invisible_parens(
|
|||||||
Standardizes on visible parentheses for single-element tuples, and keeps
|
Standardizes on visible parentheses for single-element tuples, and keeps
|
||||||
existing visible parentheses for other tuples and generator expressions.
|
existing visible parentheses for other tuples and generator expressions.
|
||||||
"""
|
"""
|
||||||
for pc in list_comments(node.prefix, is_endmarker=False, preview=mode.preview):
|
for pc in list_comments(node.prefix, is_endmarker=False):
|
||||||
if pc.value in FMT_OFF:
|
if pc.value in FMT_OFF:
|
||||||
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
|
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
|
||||||
return
|
return
|
||||||
@ -1139,8 +1119,7 @@ def normalize_invisible_parens(
|
|||||||
|
|
||||||
if check_lpar:
|
if check_lpar:
|
||||||
if (
|
if (
|
||||||
mode.preview
|
child.type == syms.atom
|
||||||
and child.type == syms.atom
|
|
||||||
and node.type == syms.for_stmt
|
and node.type == syms.for_stmt
|
||||||
and isinstance(child.prev_sibling, Leaf)
|
and isinstance(child.prev_sibling, Leaf)
|
||||||
and child.prev_sibling.type == token.NAME
|
and child.prev_sibling.type == token.NAME
|
||||||
@ -1152,9 +1131,7 @@ def normalize_invisible_parens(
|
|||||||
remove_brackets_around_comma=True,
|
remove_brackets_around_comma=True,
|
||||||
):
|
):
|
||||||
wrap_in_parentheses(node, child, visible=False)
|
wrap_in_parentheses(node, child, visible=False)
|
||||||
elif (
|
elif isinstance(child, Node) and node.type == syms.with_stmt:
|
||||||
mode.preview and isinstance(child, Node) and node.type == syms.with_stmt
|
|
||||||
):
|
|
||||||
remove_with_parens(child, node)
|
remove_with_parens(child, node)
|
||||||
elif child.type == syms.atom:
|
elif child.type == syms.atom:
|
||||||
if maybe_make_parens_invisible_in_atom(
|
if maybe_make_parens_invisible_in_atom(
|
||||||
@ -1180,7 +1157,7 @@ def normalize_invisible_parens(
|
|||||||
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
|
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
|
||||||
wrap_in_parentheses(node, child, visible=False)
|
wrap_in_parentheses(node, child, visible=False)
|
||||||
|
|
||||||
comma_check = child.type == token.COMMA if mode.preview else False
|
comma_check = child.type == token.COMMA
|
||||||
|
|
||||||
check_lpar = isinstance(child, Leaf) and (
|
check_lpar = isinstance(child, Leaf) and (
|
||||||
child.value in parens_after or comma_check
|
child.value in parens_after or comma_check
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
)
|
)
|
||||||
|
|
||||||
from black.brackets import DOT_PRIORITY, BracketTracker
|
from black.brackets import DOT_PRIORITY, BracketTracker
|
||||||
from black.mode import Mode, Preview
|
from black.mode import Mode
|
||||||
from black.nodes import (
|
from black.nodes import (
|
||||||
BRACKETS,
|
BRACKETS,
|
||||||
CLOSING_BRACKETS,
|
CLOSING_BRACKETS,
|
||||||
@ -275,8 +275,7 @@ def has_magic_trailing_comma(
|
|||||||
- it's not a single-element subscript
|
- it's not a single-element subscript
|
||||||
Additionally, if ensure_removable:
|
Additionally, if ensure_removable:
|
||||||
- it's not from square bracket indexing
|
- it's not from square bracket indexing
|
||||||
(specifically, single-element square bracket indexing with
|
(specifically, single-element square bracket indexing)
|
||||||
Preview.skip_magic_trailing_comma_in_subscript)
|
|
||||||
"""
|
"""
|
||||||
if not (
|
if not (
|
||||||
closing.type in CLOSING_BRACKETS
|
closing.type in CLOSING_BRACKETS
|
||||||
@ -290,8 +289,7 @@ def has_magic_trailing_comma(
|
|||||||
|
|
||||||
if closing.type == token.RSQB:
|
if closing.type == token.RSQB:
|
||||||
if (
|
if (
|
||||||
Preview.one_element_subscript in self.mode
|
closing.parent
|
||||||
and closing.parent
|
|
||||||
and closing.parent.type == syms.trailer
|
and closing.parent.type == syms.trailer
|
||||||
and closing.opening_bracket
|
and closing.opening_bracket
|
||||||
and is_one_sequence_between(
|
and is_one_sequence_between(
|
||||||
@ -309,18 +307,16 @@ def has_magic_trailing_comma(
|
|||||||
comma = self.leaves[-1]
|
comma = self.leaves[-1]
|
||||||
if comma.parent is None:
|
if comma.parent is None:
|
||||||
return False
|
return False
|
||||||
if Preview.skip_magic_trailing_comma_in_subscript in self.mode:
|
return (
|
||||||
return (
|
comma.parent.type != syms.subscriptlist
|
||||||
comma.parent.type != syms.subscriptlist
|
or closing.opening_bracket is None
|
||||||
or closing.opening_bracket is None
|
or not is_one_sequence_between(
|
||||||
or not is_one_sequence_between(
|
closing.opening_bracket,
|
||||||
closing.opening_bracket,
|
closing,
|
||||||
closing,
|
self.leaves,
|
||||||
self.leaves,
|
brackets=(token.LSQB, token.RSQB),
|
||||||
brackets=(token.LSQB, token.RSQB),
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
return comma.parent.type == syms.listmaker
|
)
|
||||||
|
|
||||||
if self.is_import:
|
if self.is_import:
|
||||||
return True
|
return True
|
||||||
@ -592,11 +588,7 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
|
|||||||
):
|
):
|
||||||
return before, 1
|
return before, 1
|
||||||
|
|
||||||
if (
|
if self.previous_line and self.previous_line.opens_block:
|
||||||
Preview.remove_block_trailing_newline in current_line.mode
|
|
||||||
and self.previous_line
|
|
||||||
and self.previous_line.opens_block
|
|
||||||
):
|
|
||||||
return 0, 0
|
return 0, 0
|
||||||
return before, 0
|
return before, 0
|
||||||
|
|
||||||
@ -629,9 +621,7 @@ def _maybe_empty_lines_for_class_or_def(
|
|||||||
):
|
):
|
||||||
slc = self.semantic_leading_comment
|
slc = self.semantic_leading_comment
|
||||||
if (
|
if (
|
||||||
Preview.empty_lines_before_class_or_def_with_leading_comments
|
slc is not None
|
||||||
in current_line.mode
|
|
||||||
and slc is not None
|
|
||||||
and slc.previous_block is not None
|
and slc.previous_block is not None
|
||||||
and not slc.previous_block.original_line.is_class
|
and not slc.previous_block.original_line.is_class
|
||||||
and not slc.previous_block.original_line.opens_block
|
and not slc.previous_block.original_line.opens_block
|
||||||
|
@ -154,15 +154,7 @@ class Preview(Enum):
|
|||||||
"""Individual preview style features."""
|
"""Individual preview style features."""
|
||||||
|
|
||||||
hex_codes_in_unicode_sequences = auto()
|
hex_codes_in_unicode_sequences = auto()
|
||||||
annotation_parens = auto()
|
|
||||||
empty_lines_before_class_or_def_with_leading_comments = auto()
|
|
||||||
handle_trailing_commas_in_head = auto()
|
|
||||||
long_docstring_quotes_on_newline = auto()
|
|
||||||
normalize_docstring_quotes_and_prefixes_properly = auto()
|
|
||||||
one_element_subscript = auto()
|
|
||||||
prefer_splitting_right_hand_side_of_assignments = auto()
|
prefer_splitting_right_hand_side_of_assignments = auto()
|
||||||
remove_block_trailing_newline = auto()
|
|
||||||
remove_redundant_parens = auto()
|
|
||||||
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
||||||
# for https://github.com/psf/black/issues/3117 to be fixed.
|
# for https://github.com/psf/black/issues/3117 to be fixed.
|
||||||
string_processing = auto()
|
string_processing = auto()
|
||||||
|
@ -144,8 +144,9 @@
|
|||||||
-tuple[
|
-tuple[
|
||||||
- str, int, float, dict[str, int]
|
- str, int, float, dict[str, int]
|
||||||
-]
|
-]
|
||||||
|
-tuple[str, int, float, dict[str, int],]
|
||||||
|
+tuple[str, int, float, dict[str, int]]
|
||||||
+tuple[str, int, float, dict[str, int]]
|
+tuple[str, int, float, dict[str, int]]
|
||||||
tuple[str, int, float, dict[str, int],]
|
|
||||||
very_long_variable_name_filters: t.List[
|
very_long_variable_name_filters: t.List[
|
||||||
t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]],
|
t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]],
|
||||||
]
|
]
|
||||||
|
12
tests/data/preview/return_annotation_brackets_string.py
Normal file
12
tests/data/preview/return_annotation_brackets_string.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# Long string example
|
||||||
|
def frobnicate() -> "ThisIsTrulyUnreasonablyExtremelyLongClassName | list[ThisIsTrulyUnreasonablyExtremelyLongClassName]":
|
||||||
|
pass
|
||||||
|
|
||||||
|
# output
|
||||||
|
|
||||||
|
# Long string example
|
||||||
|
def frobnicate() -> (
|
||||||
|
"ThisIsTrulyUnreasonablyExtremelyLongClassName |"
|
||||||
|
" list[ThisIsTrulyUnreasonablyExtremelyLongClassName]"
|
||||||
|
):
|
||||||
|
pass
|
@ -19,3 +19,27 @@
|
|||||||
CtxManager3() as example3,
|
CtxManager3() as example3,
|
||||||
):
|
):
|
||||||
...
|
...
|
||||||
|
|
||||||
|
# output
|
||||||
|
|
||||||
|
with CtxManager() as example:
|
||||||
|
...
|
||||||
|
|
||||||
|
with CtxManager1(), CtxManager2():
|
||||||
|
...
|
||||||
|
|
||||||
|
with CtxManager1() as example, CtxManager2():
|
||||||
|
...
|
||||||
|
|
||||||
|
with CtxManager1(), CtxManager2() as example:
|
||||||
|
...
|
||||||
|
|
||||||
|
with CtxManager1() as example1, CtxManager2() as example2:
|
||||||
|
...
|
||||||
|
|
||||||
|
with (
|
||||||
|
CtxManager1() as example1,
|
||||||
|
CtxManager2() as example2,
|
||||||
|
CtxManager3() as example3,
|
||||||
|
):
|
||||||
|
...
|
||||||
|
@ -76,7 +76,7 @@
|
|||||||
except:
|
except:
|
||||||
try:
|
try:
|
||||||
raise TypeError(int)
|
raise TypeError(int)
|
||||||
except* (Exception):
|
except* Exception:
|
||||||
pass
|
pass
|
||||||
1 / 0
|
1 / 0
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -62,7 +62,6 @@ async def await_the_walrus():
|
|||||||
with (x := await a, y := await b):
|
with (x := await a, y := await b):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Ideally we should remove one set of parentheses
|
|
||||||
with ((x := await a, y := await b)):
|
with ((x := await a, y := await b)):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -137,8 +136,7 @@ async def await_the_walrus():
|
|||||||
with (x := await a, y := await b):
|
with (x := await a, y := await b):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Ideally we should remove one set of parentheses
|
with (x := await a, y := await b):
|
||||||
with ((x := await a, y := await b)):
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
with (x := await a), (y := await b):
|
with (x := await a), (y := await b):
|
||||||
|
@ -226,6 +226,7 @@ def _init_host(self, parsed) -> None:
|
|||||||
add_compiler(compilers[(7.0, 32)])
|
add_compiler(compilers[(7.0, 32)])
|
||||||
# add_compiler(compilers[(7.1, 64)])
|
# add_compiler(compilers[(7.1, 64)])
|
||||||
|
|
||||||
|
|
||||||
# Comment before function.
|
# Comment before function.
|
||||||
def inline_comments_in_brackets_ruin_everything():
|
def inline_comments_in_brackets_ruin_everything():
|
||||||
if typedargslist:
|
if typedargslist:
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# The percent-percent comments are Spyder IDE cells.
|
# The percent-percent comments are Spyder IDE cells.
|
||||||
|
|
||||||
#%%
|
|
||||||
|
# %%
|
||||||
def func():
|
def func():
|
||||||
x = """
|
x = """
|
||||||
a really long string
|
a really long string
|
||||||
@ -44,4 +45,4 @@ def func():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
#%%
|
# %%
|
@ -62,6 +62,8 @@ def decorated1():
|
|||||||
# Preview.empty_lines_before_class_or_def_with_leading_comments.
|
# Preview.empty_lines_before_class_or_def_with_leading_comments.
|
||||||
# In the current style, the user will have to split those lines by hand.
|
# In the current style, the user will have to split those lines by hand.
|
||||||
some_instruction
|
some_instruction
|
||||||
|
|
||||||
|
|
||||||
# This comment should be split from `some_instruction` by two lines but isn't.
|
# This comment should be split from `some_instruction` by two lines but isn't.
|
||||||
def g():
|
def g():
|
||||||
...
|
...
|
||||||
|
@ -119,7 +119,6 @@ def f():
|
|||||||
if not prev:
|
if not prev:
|
||||||
prevp = preceding_leaf(p)
|
prevp = preceding_leaf(p)
|
||||||
if not prevp or prevp.type in OPENING_BRACKETS:
|
if not prevp or prevp.type in OPENING_BRACKETS:
|
||||||
|
|
||||||
return NO
|
return NO
|
||||||
|
|
||||||
if prevp.type == token.EQUAL:
|
if prevp.type == token.EQUAL:
|
||||||
|
@ -205,6 +205,7 @@ def single_literal_yapf_disable():
|
|||||||
|
|
||||||
# Comment 2
|
# Comment 2
|
||||||
|
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
def func_no_args():
|
def func_no_args():
|
||||||
a; b; c
|
a; b; c
|
||||||
|
@ -116,9 +116,9 @@ def f(
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[
|
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
|
||||||
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
Set["xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"]
|
||||||
]:
|
):
|
||||||
json = {
|
json = {
|
||||||
"k": {
|
"k": {
|
||||||
"k2": {
|
"k2": {
|
||||||
@ -140,9 +140,7 @@ def some_function_with_a_really_long_name() -> (
|
|||||||
|
|
||||||
def some_method_with_a_really_long_name(
|
def some_method_with_a_really_long_name(
|
||||||
very_long_parameter_so_yeah: str, another_long_parameter: int
|
very_long_parameter_so_yeah: str, another_long_parameter: int
|
||||||
) -> (
|
) -> another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not:
|
||||||
another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not
|
|
||||||
):
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -155,10 +153,8 @@ def func() -> (
|
|||||||
|
|
||||||
|
|
||||||
def func() -> (
|
def func() -> (
|
||||||
(
|
also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black(
|
||||||
also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black(
|
this_shouldn_t_get_a_trailing_comma_too
|
||||||
this_shouldn_t_get_a_trailing_comma_too
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
@ -87,10 +87,6 @@ def foo() -> tuple[loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
|
|||||||
def foo() -> tuple[int, int, int,]:
|
def foo() -> tuple[int, int, int,]:
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
# Long string example
|
|
||||||
def frobnicate() -> "ThisIsTrulyUnreasonablyExtremelyLongClassName | list[ThisIsTrulyUnreasonablyExtremelyLongClassName]":
|
|
||||||
pass
|
|
||||||
|
|
||||||
# output
|
# output
|
||||||
# Control
|
# Control
|
||||||
def double(a: int) -> int:
|
def double(a: int) -> int:
|
||||||
@ -212,11 +208,3 @@ def foo() -> (
|
|||||||
]
|
]
|
||||||
):
|
):
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
# Long string example
|
|
||||||
def frobnicate() -> (
|
|
||||||
"ThisIsTrulyUnreasonablyExtremelyLongClassName |"
|
|
||||||
" list[ThisIsTrulyUnreasonablyExtremelyLongClassName]"
|
|
||||||
):
|
|
||||||
pass
|
|
@ -419,7 +419,8 @@ def test_skip_magic_trailing_comma(self) -> None:
|
|||||||
msg = (
|
msg = (
|
||||||
"Expected diff isn't equal to the actual. If you made changes to"
|
"Expected diff isn't equal to the actual. If you made changes to"
|
||||||
" expression.py and this is an anticipated difference, overwrite"
|
" expression.py and this is an anticipated difference, overwrite"
|
||||||
f" tests/data/expression_skip_magic_trailing_comma.diff with {dump}"
|
" tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff"
|
||||||
|
f" with {dump}"
|
||||||
)
|
)
|
||||||
self.assertEqual(expected, actual, msg)
|
self.assertEqual(expected, actual, msg)
|
||||||
|
|
||||||
|
@ -32,31 +32,15 @@ def check_file(
|
|||||||
@pytest.mark.filterwarnings("ignore:invalid escape sequence.*:DeprecationWarning")
|
@pytest.mark.filterwarnings("ignore:invalid escape sequence.*:DeprecationWarning")
|
||||||
@pytest.mark.parametrize("filename", all_data_cases("simple_cases"))
|
@pytest.mark.parametrize("filename", all_data_cases("simple_cases"))
|
||||||
def test_simple_format(filename: str) -> None:
|
def test_simple_format(filename: str) -> None:
|
||||||
check_file("simple_cases", filename, DEFAULT_MODE)
|
magic_trailing_comma = filename != "skip_magic_trailing_comma"
|
||||||
|
check_file(
|
||||||
|
"simple_cases", filename, black.Mode(magic_trailing_comma=magic_trailing_comma)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("filename", all_data_cases("preview"))
|
@pytest.mark.parametrize("filename", all_data_cases("preview"))
|
||||||
def test_preview_format(filename: str) -> None:
|
def test_preview_format(filename: str) -> None:
|
||||||
magic_trailing_comma = filename != "skip_magic_trailing_comma"
|
check_file("preview", filename, black.Mode(preview=True))
|
||||||
check_file(
|
|
||||||
"preview",
|
|
||||||
filename,
|
|
||||||
black.Mode(preview=True, magic_trailing_comma=magic_trailing_comma),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("filename", all_data_cases("preview_39"))
|
|
||||||
def test_preview_minimum_python_39_format(filename: str) -> None:
|
|
||||||
source, expected = read_data("preview_39", filename)
|
|
||||||
mode = black.Mode(preview=True)
|
|
||||||
assert_format(source, expected, mode, minimum_version=(3, 9))
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("filename", all_data_cases("preview_310"))
|
|
||||||
def test_preview_minimum_python_310_format(filename: str) -> None:
|
|
||||||
source, expected = read_data("preview_310", filename)
|
|
||||||
mode = black.Mode(preview=True)
|
|
||||||
assert_format(source, expected, mode, minimum_version=(3, 10))
|
|
||||||
|
|
||||||
|
|
||||||
def test_preview_context_managers_targeting_py38() -> None:
|
def test_preview_context_managers_targeting_py38() -> None:
|
||||||
|
Loading…
Reference in New Issue
Block a user