black/tests/test_primer.py
Cooper Lees 5bb4da02c2
Add cpython Lib/ repository config into primer config - Disabled (#2429)
* Add CPython repository into primer runs

- CPython tests is probably the best repo for black to test on as the stdlib's unittests should use all syntax
  - Limit to running in recent versions of the python runtime - e.g. today >= 3.9
    - This allows us to parse more syntax
- Exclude all failing files for now
  - Definitely have bugs to explore there - Refer to #2407 for more details there
  - Some test files on purpose have syntax errors, so we will never be able to parse them
- Add new black command arguments logging in debug mode; very handy for seeing how CLI arguments are formatted

CPython now succeeds ignoring 16 files:
```
Oh no! 💥 💔 💥
1859 files would be reformatted, 148 files would be left unchanged.
```

Testing
- Ran locally with and without string processing - Very little runtime difference BUT 3 more failed files
```
time /tmp/tb/bin/black --experimental-string-processing --check . 2>&1 | tee /tmp/black_cpython_esp
...
Oh no! 💥 💔 💥
1859 files would be reformatted, 148 files would be left unchanged, 16 files would fail to reformat.

real	4m8.563s
user	16m21.735s
sys	0m6.000s
```
- Add unittest for new covienence config file flattening that allows long arguments to be broke up into an array/list of strings

Addresses #2407

---

Commit history before merge:

* Add new `timeout_seconds` support into primer.json
- If present, will set forked process limit to that value in seconds
- Otherwise, stay with default 10 minutes (600 seconds)

* Add new "base_path" concept to black-primer
- Rather than start at the repo root start at a configured path within the repository
  - e.g. for cpython only run black on `Lib`

* Disable by default - It's too much for GitHub Actions. But let's leave config for others to use
* Minor tweak to _flatten_cli_args

Co-authored-by: Richard Si <63936253+ichard26@users.noreply.github.com>
2021-08-24 17:29:49 -04:00

234 lines
7.9 KiB
Python

#!/usr/bin/env python3
import asyncio
import sys
import unittest
from contextlib import contextmanager
from copy import deepcopy
from io import StringIO
from os import getpid
from pathlib import Path
from platform import system
from subprocess import CalledProcessError
from tempfile import TemporaryDirectory, gettempdir
from typing import Any, Callable, Generator, Iterator, Tuple
from unittest.mock import Mock, patch
from click.testing import CliRunner
from black_primer import cli, lib
EXPECTED_ANALYSIS_OUTPUT = """\
-- primer results 📊 --
68 / 69 succeeded (98.55%) ✅
1 / 69 FAILED (1.45%) 💩
- 0 projects disabled by config
- 0 projects skipped due to Python version
- 0 skipped due to long checkout
Failed projects:
## black:
- Returned 69
- stdout:
Black didn't work
"""
FAKE_PROJECT_CONFIG = {
"cli_arguments": ["--unittest"],
"expect_formatting_changes": False,
"git_clone_url": "https://github.com/psf/black.git",
}
@contextmanager
def capture_stdout(command: Callable, *args: Any, **kwargs: Any) -> Generator:
old_stdout, sys.stdout = sys.stdout, StringIO()
try:
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
finally:
sys.stdout = old_stdout
@contextmanager
def event_loop() -> Iterator[None]:
policy = asyncio.get_event_loop_policy()
loop = policy.new_event_loop()
asyncio.set_event_loop(loop)
if sys.platform == "win32":
asyncio.set_event_loop(asyncio.ProactorEventLoop())
try:
yield
finally:
loop.close()
async def raise_subprocess_error_1(*args: Any, **kwargs: Any) -> None:
raise CalledProcessError(1, ["unittest", "error"], b"", b"")
async def raise_subprocess_error_123(*args: Any, **kwargs: Any) -> None:
raise CalledProcessError(123, ["unittest", "error"], b"", b"")
async def return_false(*args: Any, **kwargs: Any) -> bool:
return False
async def return_subproccess_output(*args: Any, **kwargs: Any) -> Tuple[bytes, bytes]:
return (b"stdout", b"stderr")
async def return_zero(*args: Any, **kwargs: Any) -> int:
return 0
class PrimerLibTests(unittest.TestCase):
def test_analyze_results(self) -> None:
fake_results = lib.Results(
{
"disabled": 0,
"failed": 1,
"skipped_long_checkout": 0,
"success": 68,
"wrong_py_ver": 0,
},
{"black": CalledProcessError(69, ["black"], b"Black didn't work", b"")},
)
with capture_stdout(lib.analyze_results, 69, fake_results) as analyze_stdout:
self.assertEqual(EXPECTED_ANALYSIS_OUTPUT, analyze_stdout)
@event_loop()
def test_black_run(self) -> None:
"""Pretend to run Black to ensure we cater for all scenarios"""
loop = asyncio.get_event_loop()
project_name = "unittest"
repo_path = Path(gettempdir())
project_config = deepcopy(FAKE_PROJECT_CONFIG)
results = lib.Results({"failed": 0, "success": 0}, {})
# Test a successful Black run
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
loop.run_until_complete(
lib.black_run(project_name, repo_path, project_config, results)
)
self.assertEqual(1, results.stats["success"])
self.assertFalse(results.failed_projects)
# Test a fail based on expecting formatting changes but not getting any
project_config["expect_formatting_changes"] = True
results = lib.Results({"failed": 0, "success": 0}, {})
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
loop.run_until_complete(
lib.black_run(project_name, repo_path, project_config, results)
)
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
# Test a fail based on returning 1 and not expecting formatting changes
project_config["expect_formatting_changes"] = False
results = lib.Results({"failed": 0, "success": 0}, {})
with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1):
loop.run_until_complete(
lib.black_run(project_name, repo_path, project_config, results)
)
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
# Test a formatting error based on returning 123
with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123):
loop.run_until_complete(
lib.black_run(project_name, repo_path, project_config, results)
)
self.assertEqual(2, results.stats["failed"])
def test_flatten_cli_args(self) -> None:
fake_long_args = ["--arg", ["really/", "|long", "|regex", "|splitup"], "--done"]
expected = ["--arg", "really/|long|regex|splitup", "--done"]
self.assertEqual(expected, lib._flatten_cli_args(fake_long_args))
@event_loop()
def test_gen_check_output(self) -> None:
loop = asyncio.get_event_loop()
stdout, stderr = loop.run_until_complete(
lib._gen_check_output([lib.BLACK_BINARY, "--help"])
)
self.assertTrue("The uncompromising code formatter" in stdout.decode("utf8"))
self.assertEqual(None, stderr)
# TODO: Add a test to see failure works on Windows
if lib.WINDOWS:
return
false_bin = "/usr/bin/false" if system() == "Darwin" else "/bin/false"
with self.assertRaises(CalledProcessError):
loop.run_until_complete(lib._gen_check_output([false_bin]))
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(
lib._gen_check_output(["/bin/sleep", "2"], timeout=0.1)
)
@event_loop()
def test_git_checkout_or_rebase(self) -> None:
loop = asyncio.get_event_loop()
project_config = deepcopy(FAKE_PROJECT_CONFIG)
work_path = Path(gettempdir())
expected_repo_path = work_path / "black"
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
returned_repo_path = loop.run_until_complete(
lib.git_checkout_or_rebase(work_path, project_config)
)
self.assertEqual(expected_repo_path, returned_repo_path)
@patch("sys.stdout", new_callable=StringIO)
@event_loop()
def test_process_queue(self, mock_stdout: Mock) -> None:
"""Test the process queue on primer itself
- If you have non black conforming formatting in primer itself this can fail"""
loop = asyncio.get_event_loop()
config_path = Path(lib.__file__).parent / "primer.json"
with patch("black_primer.lib.git_checkout_or_rebase", return_false):
with TemporaryDirectory() as td:
return_val = loop.run_until_complete(
lib.process_queue(str(config_path), Path(td), 2)
)
self.assertEqual(0, return_val)
class PrimerCLITests(unittest.TestCase):
@event_loop()
def test_async_main(self) -> None:
loop = asyncio.get_event_loop()
work_dir = Path(gettempdir()) / f"primer_ut_{getpid()}"
args = {
"config": "/config",
"debug": False,
"keep": False,
"long_checkouts": False,
"rebase": False,
"workdir": str(work_dir),
"workers": 69,
"no_diff": False,
}
with patch("black_primer.cli.lib.process_queue", return_zero):
return_val = loop.run_until_complete(cli.async_main(**args)) # type: ignore
self.assertEqual(0, return_val)
def test_handle_debug(self) -> None:
self.assertTrue(cli._handle_debug(None, None, True))
def test_help_output(self) -> None:
runner = CliRunner()
result = runner.invoke(cli.main, ["--help"])
self.assertEqual(result.exit_code, 0)
if __name__ == "__main__":
unittest.main()