Prepare the 2025 stable style (#4558)
This commit is contained in:
parent
e58baf15b9
commit
c0b92f3888
19
CHANGES.md
19
CHANGES.md
@ -6,13 +6,29 @@
|
||||
|
||||
<!-- Include any especially major or disruptive changes here -->
|
||||
|
||||
This release introduces the new 2025 stable style (#4558), stabilizing
|
||||
the following changes:
|
||||
|
||||
- Normalize casing of Unicode escape characters in strings to lowercase (#2916)
|
||||
- Fix inconsistencies in whether certain strings are detected as docstrings (#4095)
|
||||
- Consistently add trailing commas to typed function parameters (#4164)
|
||||
- Remove redundant parentheses in if guards for case blocks (#4214)
|
||||
- Add parentheses to if clauses in case blocks when the line is too long (#4269)
|
||||
- Whitespace before `# fmt: skip` comments is no longer normalized (#4146)
|
||||
- Fix line length computation for certain expressions that involve the power operator (#4154)
|
||||
- Check if there is a newline before the terminating quotes of a docstring (#4185)
|
||||
- Fix type annotation spacing between `*` and more complex type variable tuple (#4440)
|
||||
|
||||
The following changes were not in any previous release:
|
||||
|
||||
- Remove parentheses around sole list items (#4312)
|
||||
|
||||
### Stable style
|
||||
|
||||
<!-- Changes that affect Black's stable style -->
|
||||
|
||||
- Fix formatting cells in IPython notebooks with magic methods and starting or trailing
|
||||
empty lines (#4484)
|
||||
|
||||
- Fix crash when formatting `with` statements containing tuple generators/unpacking
|
||||
(#4538)
|
||||
|
||||
@ -22,7 +38,6 @@
|
||||
|
||||
- Fix/remove string merging changing f-string quotes on f-strings with internal quotes
|
||||
(#4498)
|
||||
- Remove parentheses around sole list items (#4312)
|
||||
- Collapse multiple empty lines after an import into one (#4489)
|
||||
- Prevent `string_processing` and `wrap_long_dict_values_in_parens` from removing
|
||||
parentheses around long dictionary values (#4377)
|
||||
|
@ -250,6 +250,11 @@ exception of [capital "R" prefixes](#rstrings-and-rstrings), unicode literal mar
|
||||
(`u`) are removed because they are meaningless in Python 3, and in the case of multiple
|
||||
characters "r" is put first as in spoken language: "raw f-string".
|
||||
|
||||
Another area where Python allows multiple ways to format a string is escape sequences.
|
||||
For example, `"\uabcd"` and `"\uABCD"` evaluate to the same string. _Black_ normalizes
|
||||
such escape sequences to lowercase, but uses uppercase for `\N` named character escapes,
|
||||
such as `"\N{MEETEI MAYEK LETTER HUK}"`.
|
||||
|
||||
The main reason to standardize on a single form of quotes is aesthetics. Having one kind
|
||||
of quotes everywhere reduces reader distraction. It will also enable a future version of
|
||||
_Black_ to merge consecutive string literals that ended up on the same line (see
|
||||
|
@ -20,27 +20,6 @@ demoted from the `--preview` to the `--unstable` style, users can use the
|
||||
|
||||
Currently, the following features are included in the preview style:
|
||||
|
||||
- `hex_codes_in_unicode_sequences`: normalize casing of Unicode escape characters in
|
||||
strings
|
||||
- `unify_docstring_detection`: fix inconsistencies in whether certain strings are
|
||||
detected as docstrings
|
||||
- `no_normalize_fmt_skip_whitespace`: whitespace before `# fmt: skip` comments is no
|
||||
longer normalized
|
||||
- `typed_params_trailing_comma`: consistently add trailing commas to typed function
|
||||
parameters
|
||||
- `is_simple_lookup_for_doublestar_expression`: fix line length computation for certain
|
||||
expressions that involve the power operator
|
||||
- `docstring_check_for_newline`: checks if there is a newline before the terminating
|
||||
quotes of a docstring
|
||||
- `remove_redundant_guard_parens`: Removes redundant parentheses in `if` guards for
|
||||
`case` blocks.
|
||||
- `parens_for_long_if_clauses_in_case_block`: Adds parentheses to `if` clauses in `case`
|
||||
blocks when the line is too long
|
||||
- `pep646_typed_star_arg_type_var_tuple`: fix type annotation spacing between * and more
|
||||
complex type variable tuple (i.e. `def fn(*args: *tuple[*Ts, T]) -> None: pass`)
|
||||
- `remove_lone_list_item_parens`: remove redundant parentheses around lone list items
|
||||
(depends on unstable `hug_parens_with_braces_and_square_brackets` feature in some
|
||||
cases)
|
||||
- `always_one_newline_after_import`: Always force one blank line after import
|
||||
statements, except when the line after the import is a comment or an import statement
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
from functools import lru_cache
|
||||
from typing import Final, Optional, Union
|
||||
|
||||
from black.mode import Mode, Preview
|
||||
from black.mode import Mode
|
||||
from black.nodes import (
|
||||
CLOSING_BRACKETS,
|
||||
STANDALONE_COMMENT,
|
||||
@ -235,11 +235,7 @@ def convert_one_fmt_off_pair(
|
||||
standalone_comment_prefix += fmt_off_prefix
|
||||
hidden_value = comment.value + "\n" + hidden_value
|
||||
if is_fmt_skip:
|
||||
hidden_value += (
|
||||
comment.leading_whitespace
|
||||
if Preview.no_normalize_fmt_skip_whitespace in mode
|
||||
else " "
|
||||
) + comment.value
|
||||
hidden_value += comment.leading_whitespace + comment.value
|
||||
if hidden_value.endswith("\n"):
|
||||
# That happens when one of the `ignored_nodes` ended with a NEWLINE
|
||||
# leaf (possibly followed by a DEDENT).
|
||||
|
@ -43,7 +43,6 @@
|
||||
"time",
|
||||
"timeit",
|
||||
))
|
||||
TOKEN_HEX = secrets.token_hex
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
@ -160,7 +159,7 @@ def mask_cell(src: str) -> tuple[str, list[Replacement]]:
|
||||
|
||||
becomes
|
||||
|
||||
"25716f358c32750e"
|
||||
b"25716f358c32750"
|
||||
'foo'
|
||||
|
||||
The replacements are returned, along with the transformed code.
|
||||
@ -192,6 +191,18 @@ def mask_cell(src: str) -> tuple[str, list[Replacement]]:
|
||||
return transformed, replacements
|
||||
|
||||
|
||||
def create_token(n_chars: int) -> str:
|
||||
"""Create a randomly generated token that is n_chars characters long."""
|
||||
assert n_chars > 0
|
||||
n_bytes = max(n_chars // 2 - 1, 1)
|
||||
token = secrets.token_hex(n_bytes)
|
||||
if len(token) + 3 > n_chars:
|
||||
token = token[:-1]
|
||||
# We use a bytestring so that the string does not get interpreted
|
||||
# as a docstring.
|
||||
return f'b"{token}"'
|
||||
|
||||
|
||||
def get_token(src: str, magic: str) -> str:
|
||||
"""Return randomly generated token to mask IPython magic with.
|
||||
|
||||
@ -201,11 +212,11 @@ def get_token(src: str, magic: str) -> str:
|
||||
not already present anywhere else in the cell.
|
||||
"""
|
||||
assert magic
|
||||
nbytes = max(len(magic) // 2 - 1, 1)
|
||||
token = TOKEN_HEX(nbytes)
|
||||
n_chars = len(magic)
|
||||
token = create_token(n_chars)
|
||||
counter = 0
|
||||
while token in src:
|
||||
token = TOKEN_HEX(nbytes)
|
||||
token = create_token(n_chars)
|
||||
counter += 1
|
||||
if counter > 100:
|
||||
raise AssertionError(
|
||||
@ -213,9 +224,7 @@ def get_token(src: str, magic: str) -> str:
|
||||
"Please report a bug on https://github.com/psf/black/issues. "
|
||||
f"The magic might be helpful: {magic}"
|
||||
) from None
|
||||
if len(token) + 2 < len(magic):
|
||||
token = f"{token}."
|
||||
return f'"{token}"'
|
||||
return token
|
||||
|
||||
|
||||
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||
|
@ -414,10 +414,9 @@ def foo(a: (int), b: (float) = 7): ...
|
||||
yield from self.visit_default(node)
|
||||
|
||||
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
||||
if Preview.hex_codes_in_unicode_sequences in self.mode:
|
||||
normalize_unicode_escape_sequences(leaf)
|
||||
normalize_unicode_escape_sequences(leaf)
|
||||
|
||||
if is_docstring(leaf, self.mode) and not re.search(r"\\\s*\n", leaf.value):
|
||||
if is_docstring(leaf) and not re.search(r"\\\s*\n", leaf.value):
|
||||
# We're ignoring docstrings with backslash newline escapes because changing
|
||||
# indentation of those changes the AST representation of the code.
|
||||
if self.mode.string_normalization:
|
||||
@ -488,10 +487,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
|
||||
and len(indent) + quote_len <= self.mode.line_length
|
||||
and not has_trailing_backslash
|
||||
):
|
||||
if (
|
||||
Preview.docstring_check_for_newline in self.mode
|
||||
and leaf.value[-1 - quote_len] == "\n"
|
||||
):
|
||||
if leaf.value[-1 - quote_len] == "\n":
|
||||
leaf.value = prefix + quote + docstring + quote
|
||||
else:
|
||||
leaf.value = prefix + quote + docstring + "\n" + indent + quote
|
||||
@ -511,10 +507,7 @@ def visit_NUMBER(self, leaf: Leaf) -> Iterator[Line]:
|
||||
|
||||
def visit_atom(self, node: Node) -> Iterator[Line]:
|
||||
"""Visit any atom"""
|
||||
if (
|
||||
Preview.remove_lone_list_item_parens in self.mode
|
||||
and len(node.children) == 3
|
||||
):
|
||||
if len(node.children) == 3:
|
||||
first = node.children[0]
|
||||
last = node.children[-1]
|
||||
if (first.type == token.LSQB and last.type == token.RSQB) or (
|
||||
@ -602,8 +595,7 @@ def __post_init__(self) -> None:
|
||||
# PEP 634
|
||||
self.visit_match_stmt = self.visit_match_case
|
||||
self.visit_case_block = self.visit_match_case
|
||||
if Preview.remove_redundant_guard_parens in self.mode:
|
||||
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
|
||||
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
|
||||
|
||||
|
||||
def _hugging_power_ops_line_to_string(
|
||||
@ -1132,12 +1124,7 @@ def _ensure_trailing_comma(
|
||||
return False
|
||||
# Don't add commas if we already have any commas
|
||||
if any(
|
||||
leaf.type == token.COMMA
|
||||
and (
|
||||
Preview.typed_params_trailing_comma not in original.mode
|
||||
or not is_part_of_annotation(leaf)
|
||||
)
|
||||
for leaf in leaves
|
||||
leaf.type == token.COMMA and not is_part_of_annotation(leaf) for leaf in leaves
|
||||
):
|
||||
return False
|
||||
|
||||
@ -1418,11 +1405,7 @@ def normalize_invisible_parens( # noqa: C901
|
||||
)
|
||||
|
||||
# Add parentheses around if guards in case blocks
|
||||
if (
|
||||
isinstance(child, Node)
|
||||
and child.type == syms.guard
|
||||
and Preview.parens_for_long_if_clauses_in_case_block in mode
|
||||
):
|
||||
if isinstance(child, Node) and child.type == syms.guard:
|
||||
normalize_invisible_parens(
|
||||
child, parens_after={"if"}, mode=mode, features=features
|
||||
)
|
||||
|
@ -204,9 +204,7 @@ def _is_triple_quoted_string(self) -> bool:
|
||||
@property
|
||||
def is_docstring(self) -> bool:
|
||||
"""Is the line a docstring?"""
|
||||
if Preview.unify_docstring_detection not in self.mode:
|
||||
return self._is_triple_quoted_string
|
||||
return bool(self) and is_docstring(self.leaves[0], self.mode)
|
||||
return bool(self) and is_docstring(self.leaves[0])
|
||||
|
||||
@property
|
||||
def is_chained_assignment(self) -> bool:
|
||||
|
@ -196,24 +196,12 @@ def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> b
|
||||
class Preview(Enum):
|
||||
"""Individual preview style features."""
|
||||
|
||||
hex_codes_in_unicode_sequences = auto()
|
||||
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
||||
# for https://github.com/psf/black/issues/3117 to be fixed.
|
||||
string_processing = auto()
|
||||
hug_parens_with_braces_and_square_brackets = auto()
|
||||
unify_docstring_detection = auto()
|
||||
no_normalize_fmt_skip_whitespace = auto()
|
||||
wrap_long_dict_values_in_parens = auto()
|
||||
multiline_string_handling = auto()
|
||||
typed_params_trailing_comma = auto()
|
||||
is_simple_lookup_for_doublestar_expression = auto()
|
||||
docstring_check_for_newline = auto()
|
||||
remove_redundant_guard_parens = auto()
|
||||
parens_for_long_if_clauses_in_case_block = auto()
|
||||
# NOTE: remove_lone_list_item_parens requires
|
||||
# hug_parens_with_braces_and_square_brackets to remove parens in some cases
|
||||
remove_lone_list_item_parens = auto()
|
||||
pep646_typed_star_arg_type_var_tuple = auto()
|
||||
always_one_newline_after_import = auto()
|
||||
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
from black.cache import CACHE_DIR
|
||||
from black.mode import Mode, Preview
|
||||
from black.mode import Mode
|
||||
from black.strings import get_string_prefix, has_triple_quotes
|
||||
from blib2to3 import pygram
|
||||
from blib2to3.pgen2 import token
|
||||
@ -244,13 +244,7 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no
|
||||
elif (
|
||||
prevp.type == token.STAR
|
||||
and parent_type(prevp) == syms.star_expr
|
||||
and (
|
||||
parent_type(prevp.parent) == syms.subscriptlist
|
||||
or (
|
||||
Preview.pep646_typed_star_arg_type_var_tuple in mode
|
||||
and parent_type(prevp.parent) == syms.tname_star
|
||||
)
|
||||
)
|
||||
and parent_type(prevp.parent) in (syms.subscriptlist, syms.tname_star)
|
||||
):
|
||||
# No space between typevar tuples or unpacking them.
|
||||
return NO
|
||||
@ -551,7 +545,7 @@ def is_arith_like(node: LN) -> bool:
|
||||
}
|
||||
|
||||
|
||||
def is_docstring(node: NL, mode: Mode) -> bool:
|
||||
def is_docstring(node: NL) -> bool:
|
||||
if isinstance(node, Leaf):
|
||||
if node.type != token.STRING:
|
||||
return False
|
||||
@ -561,8 +555,7 @@ def is_docstring(node: NL, mode: Mode) -> bool:
|
||||
return False
|
||||
|
||||
if (
|
||||
Preview.unify_docstring_detection in mode
|
||||
and node.parent
|
||||
node.parent
|
||||
and node.parent.type == syms.simple_stmt
|
||||
and not node.parent.prev_sibling
|
||||
and node.parent.parent
|
||||
|
@ -79,20 +79,10 @@
|
||||
"type": "array",
|
||||
"items": {
|
||||
"enum": [
|
||||
"hex_codes_in_unicode_sequences",
|
||||
"string_processing",
|
||||
"hug_parens_with_braces_and_square_brackets",
|
||||
"unify_docstring_detection",
|
||||
"no_normalize_fmt_skip_whitespace",
|
||||
"wrap_long_dict_values_in_parens",
|
||||
"multiline_string_handling",
|
||||
"typed_params_trailing_comma",
|
||||
"is_simple_lookup_for_doublestar_expression",
|
||||
"docstring_check_for_newline",
|
||||
"remove_redundant_guard_parens",
|
||||
"parens_for_long_if_clauses_in_case_block",
|
||||
"remove_lone_list_item_parens",
|
||||
"pep646_typed_star_arg_type_var_tuple",
|
||||
"always_one_newline_after_import"
|
||||
]
|
||||
},
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
from black.comments import contains_pragma_comment
|
||||
from black.lines import Line, append_leaves
|
||||
from black.mode import Feature, Mode, Preview
|
||||
from black.mode import Feature, Mode
|
||||
from black.nodes import (
|
||||
CLOSING_BRACKETS,
|
||||
OPENING_BRACKETS,
|
||||
@ -82,18 +82,12 @@ def is_simple_lookup(index: int, kind: Literal[1, -1]) -> bool:
|
||||
# Brackets and parentheses indicate calls, subscripts, etc. ...
|
||||
# basically stuff that doesn't count as "simple". Only a NAME lookup
|
||||
# or dotted lookup (eg. NAME.NAME) is OK.
|
||||
if Preview.is_simple_lookup_for_doublestar_expression not in mode:
|
||||
return original_is_simple_lookup_func(line, index, kind)
|
||||
|
||||
if kind == -1:
|
||||
return handle_is_simple_look_up_prev(line, index, {token.RPAR, token.RSQB})
|
||||
else:
|
||||
if kind == -1:
|
||||
return handle_is_simple_look_up_prev(
|
||||
line, index, {token.RPAR, token.RSQB}
|
||||
)
|
||||
else:
|
||||
return handle_is_simple_lookup_forward(
|
||||
line, index, {token.LPAR, token.LSQB}
|
||||
)
|
||||
return handle_is_simple_lookup_forward(
|
||||
line, index, {token.LPAR, token.LSQB}
|
||||
)
|
||||
|
||||
def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool:
|
||||
# An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple
|
||||
@ -139,30 +133,6 @@ def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool:
|
||||
yield new_line
|
||||
|
||||
|
||||
def original_is_simple_lookup_func(
|
||||
line: Line, index: int, step: Literal[1, -1]
|
||||
) -> bool:
|
||||
if step == -1:
|
||||
disallowed = {token.RPAR, token.RSQB}
|
||||
else:
|
||||
disallowed = {token.LPAR, token.LSQB}
|
||||
|
||||
while 0 <= index < len(line.leaves):
|
||||
current = line.leaves[index]
|
||||
if current.type in disallowed:
|
||||
return False
|
||||
if current.type not in {token.NAME, token.DOT} or current.value == "for":
|
||||
# If the current token isn't disallowed, we'll assume this is
|
||||
# simple as only the disallowed tokens are semantically
|
||||
# attached to this lookup expression we're checking. Also,
|
||||
# stop early if we hit the 'for' bit of a comprehension.
|
||||
return True
|
||||
|
||||
index += step
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool:
|
||||
"""
|
||||
Handling the determination of is_simple_lookup for the lines prior to the doublestar
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
# long variable name
|
||||
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
|
||||
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
"""
|
||||
87 characters ............................................................................
|
||||
"""
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
print () # fmt: skip
|
||||
print () # fmt:skip
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
x = "\x1F"
|
||||
x = "\\x1B"
|
||||
x = "\\\x1B"
|
@ -1,4 +1,4 @@
|
||||
# flags: --preview --minimum-version=3.10
|
||||
# flags: --minimum-version=3.10
|
||||
# normal, short, function definition
|
||||
def foo(a, b) -> tuple[int, float]: ...
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
m2 = None if not isinstance(dist, Normal) else m** 2 + s * 2
|
||||
m3 = None if not isinstance(dist, Normal) else m ** 2 + s * 2
|
||||
m4 = None if not isinstance(dist, Normal) else m**2 + s * 2
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
def func(
|
||||
arg1,
|
||||
arg2,
|
@ -1,7 +1,6 @@
|
||||
# flags: --preview
|
||||
"""I am a very helpful module docstring.
|
||||
|
||||
With trailing spaces (only removed with unify_docstring_detection on):
|
||||
With trailing spaces:
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
||||
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||
Ut enim ad minim veniam,
|
||||
@ -39,7 +38,7 @@
|
||||
# output
|
||||
"""I am a very helpful module docstring.
|
||||
|
||||
With trailing spaces (only removed with unify_docstring_detection on):
|
||||
With trailing spaces:
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
|
||||
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
|
||||
Ut enim ad minim veniam,
|
||||
|
@ -62,5 +62,4 @@ class MultilineDocstringsAsWell:
|
||||
|
||||
|
||||
class SingleQuotedDocstring:
|
||||
|
||||
"I'm a docstring but I don't even get triple quotes."
|
||||
|
@ -1,4 +1,4 @@
|
||||
# flags: --preview --minimum-version=3.10
|
||||
# flags: --minimum-version=3.10
|
||||
match match:
|
||||
case "test" if case != "not very loooooooooooooog condition": # comment
|
||||
pass
|
||||
|
@ -1,4 +1,4 @@
|
||||
# flags: --minimum-version=3.11 --preview
|
||||
# flags: --minimum-version=3.11
|
||||
|
||||
|
||||
def fn(*args: *tuple[*A, B]) -> None:
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
items = [(123)]
|
||||
items = [(True)]
|
||||
items = [(((((True)))))]
|
@ -1,4 +1,4 @@
|
||||
# flags: --minimum-version=3.10 --preview --line-length=79
|
||||
# flags: --minimum-version=3.10 --line-length=79
|
||||
|
||||
match 1:
|
||||
case _ if (True):
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flags: --preview
|
||||
def long_function_name_goes_here(
|
||||
x: Callable[List[int]]
|
||||
) -> Union[List[int], float, str, bytes, Tuple[int]]:
|
||||
|
@ -1,9 +1,9 @@
|
||||
# flags: --preview
|
||||
# This is testing an issue that is specific to the preview style
|
||||
# flags: --unstable
|
||||
# This is testing an issue that is specific to the unstable style (wrap_long_dict_values_in_parens)
|
||||
{
|
||||
"is_update": (up := commit.hash in update_hashes)
|
||||
}
|
||||
|
||||
# output
|
||||
# This is testing an issue that is specific to the preview style
|
||||
# This is testing an issue that is specific to the unstable style (wrap_long_dict_values_in_parens)
|
||||
{"is_update": (up := commit.hash in update_hashes)}
|
||||
|
@ -2353,8 +2353,8 @@ def test_cache_key(self) -> None:
|
||||
# If you are looking to remove one of these features, just
|
||||
# replace it with any other feature.
|
||||
values = [
|
||||
{Preview.docstring_check_for_newline},
|
||||
{Preview.hex_codes_in_unicode_sequences},
|
||||
{Preview.multiline_string_handling},
|
||||
{Preview.string_processing},
|
||||
]
|
||||
elif field.type is bool:
|
||||
values = [True, False]
|
||||
|
@ -541,8 +541,8 @@ def test_ipynb_and_pyi_flags() -> None:
|
||||
|
||||
|
||||
def test_unable_to_replace_magics(monkeypatch: MonkeyPatch) -> None:
|
||||
src = "%%time\na = 'foo'"
|
||||
monkeypatch.setattr("black.handle_ipynb_magics.TOKEN_HEX", lambda _: "foo")
|
||||
src = '%%time\na = b"foo"'
|
||||
monkeypatch.setattr("secrets.token_hex", lambda _: "foo")
|
||||
with pytest.raises(
|
||||
AssertionError, match="Black was not able to replace IPython magic"
|
||||
):
|
||||
|
Loading…
Reference in New Issue
Block a user