Enforce no formatting changes for PRs via CI (GH-2951)

Now PRs will run two diff-shades jobs, "preview-changes" which formats
all projects with preview=True, and "assert-no-changes" which formats
all projects with preview=False. The latter also fails if any changes
were made.

Pushes to main will only run "preview-changes"

Also the workflow_dispatch feature was dropped since it was
complicating everything for little gain.
This commit is contained in:
Richard Si 2022-03-26 17:22:38 -04:00 committed by GitHub
parent bd1e980349
commit f239d227c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 121 additions and 151 deletions

View File

@ -3,54 +3,61 @@ name: diff-shades
on: on:
push: push:
branches: [main] branches: [main]
paths-ignore: ["docs/**", "tests/**", "**.md", "**.rst"] paths: ["src/**", "setup.*", "pyproject.toml", ".github/workflows/*"]
pull_request: pull_request:
paths-ignore: ["docs/**", "tests/**", "**.md", "**.rst"] paths: ["src/**", "setup.*", "pyproject.toml", ".github/workflows/*"]
workflow_dispatch:
inputs:
baseline:
description: >
The baseline revision. Pro-tip, use `.pypi` to use the latest version
on PyPI or `.XXX` to use a PR.
required: true
default: main
baseline-args:
description: "Custom Black arguments (eg. -l 79)"
required: false
target:
description: >
The target revision to compare against the baseline. Same tip applies here.
required: true
target-args:
description: "Custom Black arguments (eg. -S)"
required: false
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
configure:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-config.outputs.matrix }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
- name: Install diff-shades and support dependencies
run: |
python -m pip install click packaging urllib3
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
- name: Calculate run configuration & metadata
id: set-config
env:
GITHUB_TOKEN: ${{ github.token }}
run: >
python scripts/diff_shades_gha_helper.py config ${{ github.event_name }} ${{ matrix.mode }}
analysis: analysis:
name: analysis / linux name: analysis / ${{ matrix.mode }}
needs: configure
runs-on: ubuntu-latest runs-on: ubuntu-latest
env: env:
# Clang is less picky with the C code it's given than gcc (and may # Clang is less picky with the C code it's given than gcc (and may
# generate faster binaries too). # generate faster binaries too).
CC: clang-12 CC: clang-12
strategy:
fail-fast: false
matrix:
include: ${{ fromJson(needs.configure.outputs.matrix )}}
steps: steps:
- name: Checkout this repository (full clone) - name: Checkout this repository (full clone)
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
# The baseline revision could be rather old so a full clone is ideal.
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@v3 - uses: actions/setup-python@v3
- name: Install diff-shades and support dependencies - name: Install diff-shades and support dependencies
run: | run: |
python -m pip install pip --upgrade
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
python -m pip install click packaging urllib3 python -m pip install click packaging urllib3
python -m pip install -r .github/mypyc-requirements.txt python -m pip install -r .github/mypyc-requirements.txt
@ -59,28 +66,19 @@ jobs:
git config user.name "diff-shades-gha" git config user.name "diff-shades-gha"
git config user.email "diff-shades-gha@example.com" git config user.email "diff-shades-gha@example.com"
- name: Calculate run configuration & metadata
id: config
env:
GITHUB_TOKEN: ${{ github.token }}
run: >
python helper.py config ${{ github.event_name }}
${{ github.event.inputs.baseline }} ${{ github.event.inputs.target }}
--baseline-args "${{ github.event.inputs.baseline-args }}"
- name: Attempt to use cached baseline analysis - name: Attempt to use cached baseline analysis
id: baseline-cache id: baseline-cache
uses: actions/cache@v2.1.7 uses: actions/cache@v2.1.7
with: with:
path: ${{ steps.config.outputs.baseline-analysis }} path: ${{ matrix.baseline-analysis }}
key: ${{ steps.config.outputs.baseline-cache-key }} key: ${{ matrix.baseline-cache-key }}
- name: Build and install baseline revision - name: Build and install baseline revision
if: steps.baseline-cache.outputs.cache-hit != 'true' if: steps.baseline-cache.outputs.cache-hit != 'true'
env: env:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ github.token }}
run: > run: >
${{ steps.config.outputs.baseline-setup-cmd }} ${{ matrix.baseline-setup-cmd }}
&& python setup.py --use-mypyc bdist_wheel && python setup.py --use-mypyc bdist_wheel
&& python -m pip install dist/*.whl && rm build dist -r && python -m pip install dist/*.whl && rm build dist -r
@ -88,63 +86,69 @@ jobs:
if: steps.baseline-cache.outputs.cache-hit != 'true' if: steps.baseline-cache.outputs.cache-hit != 'true'
run: > run: >
diff-shades analyze -v --work-dir projects-cache/ diff-shades analyze -v --work-dir projects-cache/
${{ steps.config.outputs.baseline-analysis }} -- ${{ github.event.inputs.baseline-args }} ${{ matrix.baseline-analysis }} ${{ matrix.force-flag }}
- name: Build and install target revision - name: Build and install target revision
env: env:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ github.token }}
run: > run: >
${{ steps.config.outputs.target-setup-cmd }} ${{ matrix.target-setup-cmd }}
&& python setup.py --use-mypyc bdist_wheel && python setup.py --use-mypyc bdist_wheel
&& python -m pip install dist/*.whl && python -m pip install dist/*.whl
- name: Analyze target revision - name: Analyze target revision
run: > run: >
diff-shades analyze -v --work-dir projects-cache/ diff-shades analyze -v --work-dir projects-cache/
${{ steps.config.outputs.target-analysis }} --repeat-projects-from ${{ matrix.target-analysis }} --repeat-projects-from
${{ steps.config.outputs.baseline-analysis }} -- ${{ github.event.inputs.target-args }} ${{ matrix.baseline-analysis }} ${{ matrix.force-flag }}
- name: Generate HTML diff report - name: Generate HTML diff report
run: > run: >
diff-shades --dump-html diff.html compare --diff --quiet diff-shades --dump-html diff.html compare --diff
${{ steps.config.outputs.baseline-analysis }} ${{ steps.config.outputs.target-analysis }} ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
- name: Upload diff report - name: Upload diff report
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: diff.html name: ${{ matrix.mode }}-diff.html
path: diff.html path: diff.html
- name: Upload baseline analysis - name: Upload baseline analysis
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: ${{ steps.config.outputs.baseline-analysis }} name: ${{ matrix.baseline-analysis }}
path: ${{ steps.config.outputs.baseline-analysis }} path: ${{ matrix.baseline-analysis }}
- name: Upload target analysis - name: Upload target analysis
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: ${{ steps.config.outputs.target-analysis }} name: ${{ matrix.target-analysis }}
path: ${{ steps.config.outputs.target-analysis }} path: ${{ matrix.target-analysis }}
- name: Generate summary file (PR only) - name: Generate summary file (PR only)
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
run: > run: >
python helper.py comment-body python helper.py comment-body
${{ steps.config.outputs.baseline-analysis }} ${{ steps.config.outputs.target-analysis }} ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
${{ steps.config.outputs.baseline-sha }} ${{ steps.config.outputs.target-sha }} ${{ matrix.baseline-sha }} ${{ matrix.target-sha }}
${{ github.event.pull_request.number }} ${{ github.event.pull_request.number }}
- name: Upload summary file (PR only) - name: Upload summary file (PR only)
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes'
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: .pr-comment.json name: .pr-comment.json
path: .pr-comment.json path: .pr-comment.json
# This is last so the diff-shades-comment workflow can still work even if we - name: Verify zero changes (PR only)
# end up detecting failed files and failing the run. if: matrix.mode == 'assert-no-changes'
- name: Check for failed files in both analyses
run: > run: >
diff-shades show-failed --check --show-log ${{ steps.config.outputs.baseline-analysis }}; diff-shades compare --check ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }}
diff-shades show-failed --check --show-log ${{ steps.config.outputs.target-analysis }} || (echo "Please verify you didn't change the stable code style unintentionally!" && exit 1)
- name: Check for failed files for target revision
# Even if the previous step failed, we should still check for failed files.
if: always()
run: >
diff-shades show-failed --check --show-log ${{ matrix.target-analysis }}
--check-allow 'sqlalchemy:test/orm/test_relationship_criteria.py'

View File

@ -9,10 +9,10 @@ enough to cause frustration to projects that are already "black formatted".
## diff-shades ## diff-shades
diff-shades is a tool that runs _Black_ across a list of Git cloneable OSS projects diff-shades is a tool that runs _Black_ across a list of open-source projects recording
recording the results. The main highlight feature of diff-shades is being able to the results. The main highlight feature of diff-shades is being able to compare two
compare two revisions of _Black_. This is incredibly useful as it allows us to see what revisions of _Black_. This is incredibly useful as it allows us to see what exact
exact changes will occur, say merging a certain PR. changes will occur, say merging a certain PR.
For more information, please see the [diff-shades documentation][diff-shades]. For more information, please see the [diff-shades documentation][diff-shades].
@ -20,35 +20,39 @@ For more information, please see the [diff-shades documentation][diff-shades].
diff-shades is also the tool behind the "diff-shades results comparing ..." / diff-shades is also the tool behind the "diff-shades results comparing ..." /
"diff-shades reports zero changes ..." comments on PRs. The project has a GitHub Actions "diff-shades reports zero changes ..." comments on PRs. The project has a GitHub Actions
workflow which runs diff-shades twice against two revisions of _Black_ according to workflow that analyzes and compares two revisions of _Black_ according to these rules:
these rules:
| | Baseline revision | Target revision | | | Baseline revision | Target revision |
| --------------------- | ----------------------- | ---------------------------- | | --------------------- | ----------------------- | ---------------------------- |
| On PRs | latest commit on `main` | PR commit with `main` merged | | On PRs | latest commit on `main` | PR commit with `main` merged |
| On pushes (main only) | latest PyPI version | the pushed commit | | On pushes (main only) | latest PyPI version | the pushed commit |
Once finished, a PR comment will be posted embedding a summary of the changes and links For pushes to main, there's only one analysis job named `preview-changes` where the
to further information. If there's a pre-existing diff-shades comment, it'll be updated preview style is used for all projects.
instead the next time the workflow is triggered on the same PR.
The workflow uploads 3-4 artifacts upon completion: the two generated analyses (they For PRs they get one more analysis job: `assert-no-changes`. It's similar to
have the .json file extension), `diff.html`, and `.pr-comment.json` if triggered by a `preview-changes` but runs with the stable code style. It will fail if changes were
PR. The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be made. This makes sure code won't be reformatted again and again within the same year in
downloaded locally. `diff.html` comes in handy for push-based or manually triggered accordance to Black's stability policy.
runs. And the analyses exist just in case you want to do further analysis using the
collected data locally.
Note that the workflow will only fail intentionally if while analyzing a file failed to Additionally for PRs, a PR comment will be posted embedding a summary of the preview
changes and links to further information. If there's a pre-existing diff-shades comment,
it'll be updated instead the next time the workflow is triggered on the same PR.
```{note}
The `preview-changes` job will only fail intentionally if while analyzing a file failed to
format. Otherwise a failure indicates a bug in the workflow. format. Otherwise a failure indicates a bug in the workflow.
```{tip}
Maintainers with write access or higher can trigger the workflow manually from the
Actions tab using the `workflow_dispatch` event. Simply select "diff-shades"
from the workflows list on the left, press "Run workflow", and fill in which revisions
and command line arguments to use.
Once finished, check the logs or download the artifacts for local use.
``` ```
The workflow uploads several artifacts upon completion:
- The raw analyses (.json)
- HTML diffs (.html)
- `.pr-comment.json` (if triggered by a PR)
The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be
downloaded locally. The HTML diffs come in handy for push-based where there's no PR to
post a comment. And the analyses exist just in case you want to do further analysis
using the collected data locally.
[diff-shades]: https://github.com/ichard26/diff-shades#readme [diff-shades]: https://github.com/ichard26/diff-shades#readme

View File

@ -23,7 +23,7 @@
import zipfile import zipfile
from io import BytesIO from io import BytesIO
from pathlib import Path from pathlib import Path
from typing import Any, Optional, Tuple from typing import Any
import click import click
import urllib3 import urllib3
@ -55,7 +55,7 @@ def set_output(name: str, value: str) -> None:
print(f"::set-output name={name}::{value}") print(f"::set-output name={name}::{value}")
def http_get(url: str, is_json: bool = True, **kwargs: Any) -> Any: def http_get(url: str, *, is_json: bool = True, **kwargs: Any) -> Any:
headers = kwargs.get("headers") or {} headers = kwargs.get("headers") or {}
headers["User-Agent"] = USER_AGENT headers["User-Agent"] = USER_AGENT
if "github" in url: if "github" in url:
@ -78,10 +78,10 @@ def http_get(url: str, is_json: bool = True, **kwargs: Any) -> Any:
return data return data
def get_branch_or_tag_revision(sha: str = "main") -> str: def get_main_revision() -> str:
data = http_get( data = http_get(
f"https://api.github.com/repos/{REPO}/commits", f"https://api.github.com/repos/{REPO}/commits",
fields={"per_page": "1", "sha": sha}, fields={"per_page": "1", "sha": "main"},
) )
assert isinstance(data[0]["sha"], str) assert isinstance(data[0]["sha"], str)
return data[0]["sha"] return data[0]["sha"]
@ -100,53 +100,18 @@ def get_pypi_version() -> Version:
return sorted_versions[0] return sorted_versions[0]
def resolve_custom_ref(ref: str) -> Tuple[str, str]:
if ref == ".pypi":
# Special value to get latest PyPI version.
version = str(get_pypi_version())
return version, f"git checkout {version}"
if ref.startswith(".") and ref[1:].isnumeric():
# Special format to get a PR.
number = int(ref[1:])
revision = get_pr_revision(number)
return (
f"pr-{number}-{revision[:SHA_LENGTH]}",
f"gh pr checkout {number} && git merge origin/main",
)
# Alright, it's probably a branch, tag, or a commit SHA, let's find out!
revision = get_branch_or_tag_revision(ref)
# We're cutting the revision short as we might be operating on a short commit SHA.
if revision == ref or revision[: len(ref)] == ref:
# It's *probably* a commit as the resolved SHA isn't different from the REF.
return revision[:SHA_LENGTH], f"git checkout {revision}"
# It's *probably* a pre-existing branch or tag, yay!
return f"{ref}-{revision[:SHA_LENGTH]}", f"git checkout {revision}"
@click.group() @click.group()
def main() -> None: def main() -> None:
pass pass
@main.command("config", help="Acquire run configuration and metadata.") @main.command("config", help="Acquire run configuration and metadata.")
@click.argument( @click.argument("event", type=click.Choice(["push", "pull_request"]))
"event", type=click.Choice(["push", "pull_request", "workflow_dispatch"]) def config(event: Literal["push", "pull_request"]) -> None:
)
@click.argument("custom_baseline", required=False)
@click.argument("custom_target", required=False)
@click.option("--baseline-args", default="")
def config(
event: Literal["push", "pull_request", "workflow_dispatch"],
custom_baseline: Optional[str],
custom_target: Optional[str],
baseline_args: str,
) -> None:
import diff_shades import diff_shades
if event == "push": if event == "push":
jobs = [{"mode": "preview-changes", "force-flag": "--force-preview-style"}]
# Push on main, let's use PyPI Black as the baseline. # Push on main, let's use PyPI Black as the baseline.
baseline_name = str(get_pypi_version()) baseline_name = str(get_pypi_version())
baseline_cmd = f"git checkout {baseline_name}" baseline_cmd = f"git checkout {baseline_name}"
@ -156,11 +121,14 @@ def config(
target_cmd = f"git checkout {target_rev}" target_cmd = f"git checkout {target_rev}"
elif event == "pull_request": elif event == "pull_request":
jobs = [
{"mode": "preview-changes", "force-flag": "--force-preview-style"},
{"mode": "assert-no-changes", "force-flag": "--force-stable-style"},
]
# PR, let's use main as the baseline. # PR, let's use main as the baseline.
baseline_rev = get_branch_or_tag_revision() baseline_rev = get_main_revision()
baseline_name = "main-" + baseline_rev[:SHA_LENGTH] baseline_name = "main-" + baseline_rev[:SHA_LENGTH]
baseline_cmd = f"git checkout {baseline_rev}" baseline_cmd = f"git checkout {baseline_rev}"
pr_ref = os.getenv("GITHUB_REF") pr_ref = os.getenv("GITHUB_REF")
assert pr_ref is not None assert pr_ref is not None
pr_num = int(pr_ref[10:-6]) pr_num = int(pr_ref[10:-6])
@ -168,27 +136,20 @@ def config(
target_name = f"pr-{pr_num}-{pr_rev[:SHA_LENGTH]}" target_name = f"pr-{pr_num}-{pr_rev[:SHA_LENGTH]}"
target_cmd = f"gh pr checkout {pr_num} && git merge origin/main" target_cmd = f"gh pr checkout {pr_num} && git merge origin/main"
# These are only needed for the PR comment. env = f"{platform.system()}-{platform.python_version()}-{diff_shades.__version__}"
set_output("baseline-sha", baseline_rev) for entry in jobs:
set_output("target-sha", pr_rev) entry["baseline-analysis"] = f"{entry['mode']}-{baseline_name}.json"
else: entry["baseline-setup-cmd"] = baseline_cmd
assert custom_baseline is not None and custom_target is not None entry["target-analysis"] = f"{entry['mode']}-{target_name}.json"
baseline_name, baseline_cmd = resolve_custom_ref(custom_baseline) entry["target-setup-cmd"] = target_cmd
target_name, target_cmd = resolve_custom_ref(custom_target) entry["baseline-cache-key"] = f"{env}-{baseline_name}-{entry['mode']}"
if baseline_name == target_name: if event == "pull_request":
# Alright we're using the same revisions but we're (hopefully) using # These are only needed for the PR comment.
# different command line arguments, let's support that too. entry["baseline-sha"] = baseline_rev
baseline_name += "-1" entry["target-sha"] = pr_rev
target_name += "-2"
set_output("baseline-analysis", baseline_name + ".json") set_output("matrix", json.dumps(jobs, indent=None))
set_output("baseline-setup-cmd", baseline_cmd) pprint.pprint(jobs)
set_output("target-analysis", target_name + ".json")
set_output("target-setup-cmd", target_cmd)
key = f"{platform.system()}-{platform.python_version()}-{diff_shades.__version__}"
key += f"-{baseline_name}-{baseline_args.encode('utf-8').hex()}"
set_output("baseline-cache-key", key)
@main.command("comment-body", help="Generate the body for a summary PR comment.") @main.command("comment-body", help="Generate the body for a summary PR comment.")
@ -238,15 +199,13 @@ def comment_details(run_id: str) -> None:
set_output("needs-comment", "true") set_output("needs-comment", "true")
jobs = http_get(data["jobs_url"])["jobs"] jobs = http_get(data["jobs_url"])["jobs"]
assert len(jobs) == 1, "multiple jobs not supported nor tested" job = next(j for j in jobs if j["name"] == "analysis / preview-changes")
job = jobs[0] diff_step = next(s for s in job["steps"] if s["name"] == DIFF_STEP_NAME)
steps = {s["name"]: s["number"] for s in job["steps"]} diff_url = job["html_url"] + f"#step:{diff_step['number']}:1"
diff_step = steps[DIFF_STEP_NAME]
diff_url = job["html_url"] + f"#step:{diff_step}:1"
artifacts_data = http_get(data["artifacts_url"])["artifacts"] artifacts = http_get(data["artifacts_url"])["artifacts"]
artifacts = {a["name"]: a["archive_download_url"] for a in artifacts_data} comment_artifact = next(a for a in artifacts if a["name"] == COMMENT_FILE)
comment_url = artifacts[COMMENT_FILE] comment_url = comment_artifact["archive_download_url"]
comment_zip = BytesIO(http_get(comment_url, is_json=False)) comment_zip = BytesIO(http_get(comment_url, is_json=False))
with zipfile.ZipFile(comment_zip) as zfile: with zipfile.ZipFile(comment_zip) as zfile:
with zfile.open(COMMENT_FILE) as rf: with zfile.open(COMMENT_FILE) as rf:

View File

@ -697,6 +697,9 @@ def reformat_code(
report.failed(path, str(exc)) report.failed(path, str(exc))
# diff-shades depends on being to monkeypatch this function to operate. I know it's
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
@mypyc_attr(patchable=True)
def reformat_one( def reformat_one(
src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report" src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
) -> None: ) -> None: