text
stringlengths 1
2.05k
|
---|
import sys |
import re
from pathlib |
import Path
from typing |
import List
REPO_ROOT = Path(__file__).resolve().parent.parent.parent |
class RelativePathFilter(logging.Filter):
def filter(self, record):
path = Path(record.pathname).resolve()
record.relativepath = str(path.relative_to(REPO_ROOT))
return True
def init_log():
logging.basicConfig(
format="[%(relativepath)s:%(lineno)d %(levelname)-1s] %(message)s", level=logging.INFO
)
logging.root.handlers[0].addFilter(RelativePathFilter())
logging.root.handlers[0].flush = sys.stderr.flush
class Sh:
def __init__(self, env=None, cwd=None):
self.env = os.environ.copy()
if env is not None:
self.env.update(env)
self.cwd = cwd
def run(self, cmd: str, **kwargs):
logging.info(f"+ {cmd}")
defaults = {
"check": True,
"shell": True,
"env": self.env,
"encoding": "utf-8",
"cwd": self.cwd,
}
defaults.update(kwargs)
return subprocess.run(cmd, **defaults)
def tags_from_title(title: str) -> List[str]:
tags = re.findall(r"\[(.*?)\]", title)
tags = [t.strip() for t in tags]
return tags |
import argparse |
import datetime |
import json |
import logging |
import urllib.error
from pathlib |
import Path
from typing |
import Dict, Any
from http_utils |
import get
from cmd_utils |
import init_log, REPO_ROOT
DOCKER_API_BASE = "https:
PAGE_SIZE = 25
TEST_DATA = None
def docker_api(url: str, use_pagination: bool = False) -> Dict[str, Any]:
"""
Run a paginated fetch from the public Docker Hub API
"""
if TEST_DATA is not None:
if url not in TEST_DATA:
raise urllib.error.HTTPError(url, 404, "Not found", {}, None)
return TEST_DATA[url]
pagination = ""
if use_pagination:
pagination = f"?page_size={PAGE_SIZE}&page=1"
url = DOCKER_API_BASE + url + pagination
r, headers = get(url)
reset = headers.get("x-ratelimit-reset")
if reset is not None:
reset = datetime.datetime.fromtimestamp(int(reset))
reset = reset.isoformat()
logging.info(
f"Docker API Rate Limit: {headers.get('x-ratelimit-remaining')} / {headers.get('x-ratelimit-limit')} (reset at {reset})"
)
return r
def image_exists(spec: str) -> bool:
name, tag = spec.split(":")
try:
r = docker_api(f"repositories/{name}/tags/{tag}")
logging.info(f"Image exists, got response: {json.dumps(r, indent=2)}")
return True
except urllib.error.HTTPError as e:
logging.exception(e)
return False
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(
description="Writes out Docker images names to be used to .docker-image-names/"
)
parser.add_argument(
"--testing-docker-data",
help="(testing only) JSON data to mock response from Docker Hub API",
)
parser.add_argument(
"--base-dir",
default=".docker-image-names",
help="(testing only) Folder to write image names to",
)
args, other = parser.parse_known_args()
name_dir = Path(args.base_dir)
images = {}
for item in other:
name, tag = item.split("=")
images[name] = tag
if args.testing_docker_data is not None:
TEST_DATA = json.loads(args.testing_docker_data)
logging.info(f"Checking if these images exist in |
tlcpack: {images}")
name_dir.mkdir(exist_ok=True)
images_to_use = {}
for filename, spec in images.items():
if image_exists(spec):
logging.info(f"{spec} found in tlcpack")
images_to_use[filename] = spec
else:
logging.info(f"{spec} not found in tlcpack, using tlcpackstaging")
part, tag = spec.split(":")
user, repo = part.split("/")
tlcpackstaging_tag = f"tlcpackstaging/{repo.replace('-', '_')}:{tag}"
images_to_use[filename] = tlcpackstaging_tag
for filename, image in images_to_use.items():
logging.info(f"Writing image {image} to {name_dir / filename}")
with open(name_dir / filename, "w") as f:
f.write(image) |
import os |
import logging |
import argparse
from git_utils |
import git, GitHubRepo, parse_remote
from cmd_utils |
import tags_from_title, init_log
if __name__ == "__main__":
help = "Exits with 0 if CI should be skipped, 1 otherwise"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-title", help="(testing) PR title to use instead of fetching from GitHub"
)
args = parser.parse_args()
init_log()
branch = git(["rev-parse", "--abbrev-ref", "HEAD"])
log = git(["log", "--format=%s", "-1"])
def check_pr_title():
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
if args.pr_title:
title = args.pr_title
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
title = pr["title"]
logging.info(f"pr title: {title}")
tags = tags_from_title(title)
logging.info(f"Found title tags: {tags}")
return "skip ci" in tags
if args.pr != "null" and args.pr.strip() != "" and branch != "main" and check_pr_title():
logging.info("PR title starts with '[skip ci]', skipping...")
exit(0)
else:
logging.info(f"Not skipping CI:\nargs.pr: {args.pr}\nbranch: {branch}\ncommit: {log}")
exit(1) |
import argparse |
import fnmatch
from typing |
import Optional
from git_utils |
import git
globs = [
"*.md",
"conda/*",
".github/*",
".asf.yaml",
".gitignore",
"LICENSE",
"NOTICE",
"KEYS",
"apps/microtvm/poetry.lock",
"apps/microtvm/pyproject.toml",
"tests/lint/*",
"tests/scripts/task_lint.sh",
]
def match_any(f: str) -> Optional[str]:
for glob in globs:
if fnmatch.fnmatch(f, glob):
return glob
return None
if __name__ == "__main__":
help = "Exits with code 1 if a change only touched files, indicating that CI could be skipped for this changeset"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--files", help="(testing only) comma separated list of files to check")
args = parser.parse_args()
print(args)
if args.files is not None:
diff = [x for x in args.files.split(",") if x.strip() != ""]
else:
diff = git(["diff", "--no-commit-id", "--name-only", "-r", "origin/main"])
diff = diff.split("\n")
diff = [d.strip() for d in diff]
diff = [d for d in diff if d != ""]
print(f"Changed files:\n{diff}")
if len(diff) == 0:
print("Found no changed files, skipping CI")
exit(0)
print(f"Checking with globs:\n{globs}")
for file in diff:
match = match_any(file)
if match is None:
print(f"{file} did not match any globs, running CI")
exit(1)
else:
print(f"{file} matched glob {match}")
print("All files matched a glob, skipping CI")
exit(0) |
import json |
import subprocess |
import re |
import os |
import base64 |
import logging
from urllib |
import request, error
from typing |
import Dict, Tuple, Any, Optional, List
DRY_RUN = object()
def compress_query(query: str) -> str:
query = query.replace("\n", "")
query = re.sub("\s+", " ", query)
return query
def post(url: str, body: Optional[Any] = None, auth: Optional[Tuple[str, str]] = None):
logging.info(f"Requesting POST to", url, "with", body)
headers = {}
req = request.Request(url, headers=headers, method="POST")
if auth is not None:
auth_str = base64.b64encode(f"{auth[0]}:{auth[1]}".encode())
req.add_header("Authorization", f"Basic {auth_str.decode()}")
if body is None:
body = ""
req.add_header("Content-Type", "application/json; charset=utf-8")
data = json.dumps(body)
data = data.encode("utf-8")
req.add_header("Content-Length", len(data))
with request.urlopen(req, data) as response:
return response.read()
def dry_run_token(is_dry_run: bool) -> Any:
if is_dry_run:
return DRY_RUN
return os.environ["GITHUB_TOKEN"]
class GitHubRepo:
GRAPHQL_URL = "https:
def __init__(self, user, repo, token, test_data=None):
self.token = token
self.user = user
self.repo = repo
self.test_data = test_data
self.num_calls = 0
self.base = f"https:
def headers(self):
return {
"Authorization": f"Bearer {self.token}",
}
def dry_run(self) -> bool:
return self.token == DRY_RUN
def graphql(self, query: str, variables: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
query = compress_query(query)
if variables is None:
variables = {}
response = self._request(
self.GRAPHQL_URL,
{"query": query, "variables": variables},
method="POST",
)
if self.dry_run():
return self.testing_response("POST", self.GRAPHQL_URL)
if "data" not in response:
msg = f"Error fetching data with query:\n{query}\n\nvariables:\n{variables}\n\nerror:\n{json.dumps( |
response, indent=2)}"
raise RuntimeError(msg)
return response
def testing_response(self, method: str, url: str) -> Any:
self.num_calls += 1
key = f"[{self.num_calls}] {method} - {url}"
if self.test_data is not None and key in self.test_data:
return self.test_data[key]
logging.info(f"Unknown URL in dry run: {key}")
return {}
def _request(self, full_url: str, body: Dict[str, Any], method: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a {method} to {full_url} with {body}")
return self.testing_response(method, full_url)
logging.info(f"Requesting {method} to {full_url} with {body}")
req = request.Request(full_url, headers=self.headers(), method=method.upper())
req.add_header("Content-Type", "application/json; charset=utf-8")
data = json.dumps(body)
data = data.encode("utf-8")
req.add_header("Content-Length", len(data))
try:
with request.urlopen(req, data) as response:
content = response.read()
except error.HTTPError as e:
msg = str(e)
error_data = e.read().decode()
raise RuntimeError(f"Error response: {msg}\n{error_data}")
logging.info(f"Got response from {full_url}: {content}")
try:
response = json.loads(content)
except json.decoder.JSONDecodeError as e:
return content
return response
def put(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="PUT")
def patch(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="PATCH")
def post(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="POST")
def get(self, url: str) -> Dict[str, Any]:
if self.dry_run():
log |
ging.info(f"Dry run, would have requested a GET to {url}")
return self.testing_response("GET", url)
url = self.base + url
logging.info(f"Requesting GET to {url}")
req = request.Request(url, headers=self.headers())
with request.urlopen(req) as response:
response = json.loads(response.read())
return response
def delete(self, url: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a DELETE to {url}")
return self.testing_response("DELETE", url)
url = self.base + url
logging.info(f"Requesting DELETE to {url}")
req = request.Request(url, headers=self.headers(), method="DELETE")
with request.urlopen(req) as response:
response = json.loads(response.read())
return response
def parse_remote(remote: str) -> Tuple[str, str]:
"""
Get a GitHub (user, repo) pair out of a git remote
"""
if remote.startswith("https:
parts = remote.split("/")
if len(parts) < 2:
raise RuntimeError(f"Unable to parse remote '{remote}'")
user, repo = parts[-2], parts[-1].replace(".git", "")
else:
m = re.search(r":(.*)/(.*)\.git", remote)
if m is None or len(m.groups()) != 2:
raise RuntimeError(f"Unable to parse remote '{remote}'")
user, repo = m.groups()
user = os.getenv("DEBUG_USER", user)
repo = os.getenv("DEBUG_REPO", repo)
return user, repo
def git(command, **kwargs):
command = ["git"] + command
logging.info(f"Running {command}")
proc = subprocess.run(command, stdout=subprocess.PIPE, encoding="utf-8", **kwargs)
if proc.returncode != 0:
raise RuntimeError(f"Command failed {command}:\nstdout:\n{proc.stdout}")
return proc.stdout.strip()
def find_ccs(body: str) -> List[str]:
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
reviewer |
s = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return list(reviewers) |
import sys |
import os |
import json |
import argparse |
import re
from urllib |
import error
from typing |
import Dict, Any, List
from git_utils |
import git, GitHubRepo, parse_remote
def find_reviewers(body: str) -> List[str]:
print(f"Parsing body:\n{body}")
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
print("Found matches:", matches)
reviewers = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return sorted(list(reviewers))
if __name__ == "__main__":
help = "Add @cc'ed people in a PR body as reviewers"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--testing-reviews-json", help="(testing only) reviews as JSON")
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
pr = json.loads(os.environ["PR"])
number = pr["number"]
body = pr["body"]
if body is None:
body = ""
new_reviewers = find_reviewers(body)
print("Found these reviewers:", new_reviewers)
if args.testing_reviews_json:
existing_reviews = json.loads(args.testing_reviews_json)
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
existing_reviews = github.get(f"pulls/{number}/reviews")
existing_review_users = [review["user"]["login"] for review in existing_reviews]
print("PR has reviews from these users:", existing_review_users)
existing_review_users = set(r.lower() for r in existing_review_users)
existing_reviewers = [review["login"] for review in pr["requested_reviewers"]]
print("PR already had these reviewers requested:", existing_revi |
ewers)
existing_reviewers_lower = {
existing_reviewer.lower() for existing_reviewer in existing_reviewers
}
to_add = []
for new_reviewer in new_reviewers:
if (
new_reviewer.lower() in existing_reviewers_lower
or new_reviewer.lower() in existing_review_users
):
print(f"{new_reviewer} is already review requested, skipping")
else:
to_add.append(new_reviewer)
print(f"After filtering existing reviewers, adding: {to_add}")
if not args.dry_run:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
for reviewer in to_add:
try:
github.post(f"pulls/{number}/requested_reviewers", {"reviewers": [reviewer]})
except KeyboardInterrupt:
sys.exit()
except (RuntimeError, error.HTTPError) as e:
print(f"Failed to add reviewer {reviewer}: {e}") |
import re |
import logging
from typing |
import Dict, Tuple, Any, Optional, List, Union
from git_utils |
import GitHubRepo
BOT_COMMENT_START = "<!---bot-comment-->"
WELCOME_TEXT = "Thanks for contributing to TVM! Please refer to the contributing guidelines https:
class BotCommentBuilder:
ALLOWLIST_USERS = {"driazati", "gigiblender", "areusch"}
def __init__(self, github: GitHubRepo, data: Dict[str, Any]):
self.github = github
self.pr_number = data["number"]
self.comment_data = data["comments"]["nodes"]
self.author = data["author"]["login"]
def find_bot_comment(self) -> Optional[Dict[str, Any]]:
"""
Return the existing bot comment or None if it does not exist
"""
for comment in self.comment_data:
logging.info(f"Checking comment {comment}")
if (
comment["author"]["login"] == "github-actions"
and BOT_COMMENT_START in comment["body"]
):
logging.info("Found existing comment")
return comment
logging.info("No existing comment found")
return None
def find_existing_body(self) -> Dict[str, str]:
"""
Find existing dynamic bullet point items
"""
existing_comment = self.find_bot_comment()
if existing_comment is None:
logging.info(f"No existing comment while searching for body items")
return {}
matches = re.findall(
r"<!--bot-comment-([a-z][a-z-]+)-start-->([\S\s]*?)<!--bot-comment-([a-z-]+)-end-->",
existing_comment["body"],
flags=re.MULTILINE,
)
logging.info(f"Fetch body item matches: {matches}")
items = {}
for start, text, end in matches:
if start != end:
raise RuntimeError(
f"Malformed comment found: {start} marker did not have matching end, found instead {end}"
)
items[start] = text.strip().lstrip("* ")
logging.info(f"Found body items: {items}")
return items
def _post_comment(self, body_items |
: Dict[str, str]):
comment = BOT_COMMENT_START + "\n\n" + WELCOME_TEXT + "\n\n"
for key, content in body_items.items():
line = self.start_key(key) + "\n * " + content.strip() + self.end_key(key)
logging.info(f"Adding line {line}")
comment += line
comment += "\n\n<sub>Generated by [tvm-bot](https:
data = {"body": comment}
url = f"issues/{self.pr_number}/comments"
logging.info(f"Commenting {comment} on {url}")
if self.author not in self.ALLOWLIST_USERS:
logging.info(f"Skipping comment for author {self.author}")
return
existing_comment = self.find_bot_comment()
if existing_comment is None:
r = self.github.post(url, data)
else:
comment_url = f"issues/comments/{existing_comment['databaseId']}"
r = self.github.patch(comment_url, data)
logging.info(f"Got response from posting comment: {r}")
def start_key(self, key: str) -> str:
return f"<!--bot-comment-{key}-start-->"
def end_key(self, key: str) -> str:
return f"<!--bot-comment-{key}-end-->"
def post_items(self, items: List[Tuple[str, str]]):
"""
Update or post bullet points in the PR based on 'items' which is a
list of (key, text) pairs
"""
body_items = self.find_existing_body()
for key, text in items:
if text is None or text.strip() == "":
logging.info(f"Skipping {key} since it was empty")
continue
logging.info(f"Updating comment items {key} with {text}")
body_items[key] = text.strip()
self._post_comment(body_items=body_items) |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Any
def build_docs_url(base_url_docs, pr_number, build_number):
return f"{base_url_docs}/PR-{str(pr_number)}/{str(build_number)}/docs/index.html"
def find_target_url(pr_head: Dict[str, Any]):
for status in pr_head["statusCheckRollup"]["contexts"]["nodes"]:
if status.get("context", "") == "tvm-ci/pr-head":
return status["targetUrl"]
raise RuntimeError(f"Unable to find tvm-ci/pr-head status in {pr_head}")
def get_pr_and_build_numbers(target_url):
target_url = target_url[target_url.find("PR-") : len(target_url)]
split = target_url.split("/")
pr_number = split[0].strip("PR-")
build_number = split[1]
return {"pr_number": pr_number, "build_number": build_number}
def get_doc_url(pr: Dict[str, Any], base_docs_url: str = "https://pr-docs.tlcpack.ai") -> str:
pr_head = pr["commits"]["nodes"][0]["commit"]
target_url = find_target_url(pr_head)
pr_and_build = get_pr_and_build_numbers(target_url)
commit_sha = pr_head["oid"]
docs_url = build_docs_url(
base_docs_url, pr_and_build["pr_number"], pr_and_build["build_number"]
)
return f"Built docs for commit {commit_sha} can be found [here]({docs_url})."
|
import argparse |
import os |
import json
from git_utils |
import git, GitHubRepo, parse_remote, DRY_RUN
from cmd_utils |
import init_log
from github_commenter |
import BotCommentBuilder
from github_skipped_tests_comment |
import get_skipped_tests_comment
from github_tag_teams |
import get_tags
from github_docs_comment |
import get_doc_url
PR_QUERY = """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
title
body
state
number
author {
login
}
labels(first:100) {
nodes {
name
}
}
comments(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
author {
login
}
databaseId
body
}
}
commits(last: 1) {
nodes {
commit {
oid
statusCheckRollup {
contexts(first: 100) {
pageInfo {
hasNextPage
}
nodes {
... on StatusContext {
state
context
targetUrl
}
}
}
}
}
}
}
}
}
}
"""
if __name__ == "__main__":
help = "Comment a welcome message on PRs"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--pr", required=True)
parser.add_argument("--test-data", help="(testing) mock GitHub API data")
parser.add_argument("--test-comments", help="(testing) testing comments")
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
test_data = None
if args.test_data is not None:
test_data = json.loads(args.test_dat |
a)
github = GitHubRepo(
user=user,
repo=repo,
token=DRY_RUN if args.dry_run else os.environ["GITHUB_TOKEN"],
test_data=test_data,
)
pr_data = github.graphql(
PR_QUERY,
{
"owner": user,
"name": repo,
"number": int(args.pr),
},
)
pr_data = pr_data["data"]["repository"]["pullRequest"]
commenter = BotCommentBuilder(github=github, data=pr_data)
if args.test_comments is not None:
test_comments = json.loads(args.test_comments)
skipped_tests = test_comments["skipped-tests"]
ccs = test_comments["ccs"]
docs_info = test_comments["docs"]
else:
skipped_tests = get_skipped_tests_comment(pr_data, github=github)
ccs = get_tags(pr_data, github, team_issue=10317)
docs_info = get_doc_url(pr_data)
items = {
"ccs": ccs,
"skipped-tests": skipped_tests,
"docs": docs_info,
}
commenter.post_items(items=items.items()) |
import inspect |
import json |
import os |
import logging |
import subprocess
from xml.etree |
import ElementTree
from pathlib |
import Path
from typing |
import Dict, Any, Optional
def run_subprocess(command):
logging.info(f"Running command {command}")
proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE, encoding="utf-8")
if proc.returncode != 0:
raise RuntimeError(f"Command failed {command}:\nstdout:\n{proc.stdout}")
return proc
def retrieve_test_report(s3_url, target_dir):
command = f"aws --region us-west-2 s3 cp {s3_url} {target_dir} --recursive --no-sign-request"
run_subprocess(command)
def get_common_commit_sha():
command = "git merge-base origin/main HEAD"
proc = run_subprocess(command)
return proc.stdout.strip()
def get_main_jenkins_build_number(github, common_commit):
json = github.get(f"commits/{common_commit}/status")
for status in reversed(json["statuses"]):
if status["context"] != "tvm-ci/branch":
continue
state = status["state"]
target_url = str(status["target_url"])
build_number = (
target_url[target_url.find("job/main") : len(target_url)]
.strip("job/main/")
.strip("/display/redirect")
)
assert build_number.isdigit()
return {"build_number": build_number, "state": state}
raise RuntimeError(f"Failed to find main build number for commit {common_commit}")
def retrieve_test_reports(
common_main_build, pr_number, build_number, s3_prefix, pr_test_report_dir, main_test_report_dir
):
cur_build_s3_link = (
f"s3:
)
retrieve_test_report(cur_build_s3_link, pr_test_report_dir)
common_build_s3_link = f"s3:
retrieve_test_report(common_build_s3_link, main_test_report_dir)
def get_pr_and_build_numbers(target_url):
target_url = target_url[target_url.find("PR-") : len(target_url)]
split = target_url.split("/")
pr_number = split[0].strip("PR-")
build_number = split[1]
return {"pr_number": pr_number, "build_number": build_number}
def build_test_set(directory):
directory = Path(directory)
subdir_to_skipped = {}
subdirs = [ |
item for item in os.listdir(directory) if os.path.isdir(os.path.join(directory, item))
]
for subdir in subdirs:
subdir_to_skipped[subdir] = set()
for root, _, files in os.walk(directory / subdir):
for file in files:
test_report = ElementTree.parse(Path(root) / file)
for testcase in test_report.iter("testcase"):
skipped = testcase.find("skipped")
if skipped is not None:
key = testcase.attrib["classname"] + "
subdir_to_skipped[subdir].add(key)
return subdir_to_skipped
def to_node_name(dir_name: str):
return dir_name.replace("_", ": ", 1)
def build_diff_comment_with_main(
common_commit_sha,
skipped_list,
commit_sha,
):
if len(skipped_list) == 0:
return f"No diff in skipped tests with main found in this branch for commit {commit_sha}.\n"
text = (
f"The list below shows tests that ran in main {common_commit_sha} but were "
f"skipped in the CI build of {commit_sha}:\n"
f"```\n"
)
for skip in skipped_list:
text += skip + "\n"
text += f"```\n"
return text
def build_comment(
common_commit_sha,
common_main_build,
skipped_list,
additional_skipped_list,
pr_number,
build_number,
commit_sha,
jenkins_prefix,
):
if common_main_build["state"] != "success":
return f"Unable to run tests bot because main failed to pass CI at {common_commit_sha}."
text = build_diff_comment_with_main(common_commit_sha, skipped_list, commit_sha)
if len(additional_skipped_list) != 0:
text += "\n"
text += (
f"Additional tests that were skipped in the CI build and present in the [`required_tests_to_run`]"
f"(https:
f"\n```\n"
)
for skip in additional_skipped_list:
text += skip + "\n"
text += f"```\n"
text += (
f"A detailed report of ran tests is [here]( |
https:
f"/{str(build_number)}/testReport/)."
)
return text
def find_target_url(pr_head: Dict[str, Any]):
for status in pr_head["statusCheckRollup"]["contexts"]["nodes"]:
if status.get("context", "") == "tvm-ci/pr-head":
return status["targetUrl"]
raise RuntimeError(f"Unable to find tvm-ci/pr-head status in {pr_head}")
def get_skipped_tests_comment(
pr: Dict[str, Any],
github,
s3_prefix: str = "tvm-jenkins-artifacts-prod",
jenkins_prefix: str = "ci.tlcpack.ai",
pr_test_report_dir: str = "pr-reports",
main_test_report_dir: str = "main-reports",
common_commit_sha: Optional[str] = None,
common_main_build: Optional[Dict[str, Any]] = None,
additional_tests_to_check_file: str = "required_tests_to_run.json",
) -> str:
pr_head = pr["commits"]["nodes"][0]["commit"]
target_url = find_target_url(pr_head)
pr_and_build = get_pr_and_build_numbers(target_url)
logging.info(f"Getting comment for {pr_head} with target {target_url}")
commit_sha = pr_head["oid"]
is_dry_run = common_commit_sha is not None
if not is_dry_run:
logging.info("Fetching common commit sha and build info")
common_commit_sha = get_common_commit_sha()
common_main_build = get_main_jenkins_build_number(github, common_commit_sha)
retrieve_test_reports(
common_main_build=common_main_build["build_number"],
pr_number=pr_and_build["pr_number"],
build_number=pr_and_build["build_number"],
s3_prefix=s3_prefix,
main_test_report_dir=main_test_report_dir,
pr_test_report_dir=pr_test_report_dir,
)
else:
logging.info("Dry run, expecting PR and main reports on disk")
main_tests = build_test_set(main_test_report_dir)
build_tests = build_test_set(pr_test_report_dir)
skipped_list = []
for subdir, skipped_set in build_tests.items():
skipped_main = main_tests[subdir]
if skipped_main is None:
logging.wa |
rning(f"Could not find directory {subdir} in main.")
continue
diff_set = skipped_set - skipped_main
if len(diff_set) != 0:
for test in diff_set:
skipped_list.append(f"{to_node_name(subdir)} -> {test}")
skipped_list.sort()
if len(skipped_list) == 0:
logging.info("No skipped tests found.")
if not is_dry_run:
current_file = Path(__file__).resolve()
additional_tests_to_check_file = Path(current_file).parent / "required_tests_to_run.json"
logging.info(
f"Checking additional tests in file {additional_tests_to_check_file} are not skipped."
)
try:
with open(additional_tests_to_check_file, "r") as f:
additional_tests_to_check = json.load(f)
except IOError:
logging.info(
f"Failed to read additional tests from file: {additional_tests_to_check_file}."
)
additional_tests_to_check = {}
additional_skipped_tests = []
for subdir, test_set in additional_tests_to_check.items():
if subdir not in build_tests.keys():
logging.warning(f"Could not find directory {subdir} in the build test set.")
continue
for test in test_set:
if test in build_tests[subdir]:
additional_skipped_tests.append(f"{to_node_name(subdir)} -> {test}")
if len(additional_skipped_tests) == 0:
logging.info("No skipped tests found in the additional list.")
body = build_comment(
common_commit_sha,
common_main_build,
skipped_list,
additional_skipped_tests,
pr_and_build["pr_number"],
pr_and_build["build_number"],
commit_sha,
jenkins_prefix,
)
return body |
import os |
import json |
import argparse |
import logging |
import re
from typing |
import Dict, Any, List, Tuple, Optional
from git_utils |
import git, GitHubRepo, parse_remote, find_ccs, dry_run_token
from cmd_utils |
import tags_from_title, init_log
GITHUB_NAME_REGEX = r"@[a-zA-Z0-9-]+"
def parse_line(line: str) -> Tuple[str, List[str]]:
line = line.lstrip(" -")
line = line.split()
tag_items = []
tag_end = 0
for i, piece in enumerate(line):
if piece.startswith("@"):
tag_end = i
break
tag_items.append(piece)
tag = " ".join(tag_items).rstrip(":")
users = []
for piece in line[tag_end:]:
if piece.startswith("@"):
users.append(piece.lstrip("@"))
return (tag, list(sorted(users)))
def fetch_issue(github: GitHubRepo, issue_number: int):
query = """query($owner: String!, $name: String!, $number: Int!){
repository(owner: $owner, name: $name) {
issue(number: $number) {
body
comments(first:100) {
nodes {
body
}
}
}
}
}"""
r = github.graphql(
query,
variables={
"owner": github.user,
"name": github.repo,
"number": issue_number,
},
)
return r
def parse_teams(r: Dict[str, Any], issue_number: int) -> Dict[str, str]:
"""
Fetch an issue and parse out series of tagged people from the issue body
and comments
"""
issue = r["data"]["repository"]["issue"]
if issue is None or issue.get("body") is None:
raise RuntimeError(f"Could not find issue
result = {}
def add_tag(tag, users):
if tag in result:
result[tag] += users
else:
result[tag] = users
for line in issue["body"].split("\n"):
line = line.strip()
if not line.startswith("- "):
continue
if "@" not in line:
continue
tag, users = parse_line(line)
add_tag(tag, users)
for comment in issue["comments"]["nodes"]:
for line in comment["body"].split("\n"):
if "@" not in line:
continue
tag, users = parse_line(li |
ne)
add_tag(tag, users)
for tag in result:
result[tag] = list(set(result[tag]))
return {k.lower(): v for k, v in result.items() if k.strip()}
def tags_from_labels(labels: List[Dict[str, Any]]) -> List[str]:
return [label["name"] for label in labels]
def add_ccs_to_body(body: str, to_cc: List[str]) -> str:
lines = body.split("\n")
cc_line_idx = None
for i, line in enumerate(reversed(lines)):
if line.strip() == "":
continue
if line.startswith("cc @"):
cc_line_idx = len(lines) - i - 1
else:
break
def gen_cc_line(users):
users = sorted(users)
return "cc " + " ".join([f"@{user}" for user in users])
if cc_line_idx is None:
print("Did not find existing cc line")
lines.append("")
lines.append(gen_cc_line(to_cc))
else:
line = lines[cc_line_idx]
print(f"Found existing cc line at {cc_line_idx}: {line}")
existing_ccs = find_ccs(line)
print(f"Found cc's: {existing_ccs}")
if set(to_cc).issubset(set(existing_ccs)):
return None
line = gen_cc_line(set(existing_ccs + to_cc))
lines[cc_line_idx] = line
return "\n".join(lines)
def determine_users_to_cc(
issue: Dict[str, Any], github: GitHubRepo, team_issue: str, issue_data: Optional[Dict[str, Any]]
) -> List[str]:
if issue_data is None:
issue_data = fetch_issue(github, issue_number=int(team_issue))
teams = parse_teams(issue_data, issue_number=int(team_issue))
logging.info(f"Found these teams in issue
title = issue["title"]
if "author" in issue:
author = issue["author"]["login"]
else:
author = issue["user"]["login"]
tags = tags_from_title(title)
if isinstance(issue["labels"], dict):
tags += tags_from_labels(issue["labels"]["nodes"])
else:
tags += tags_from_labels(issue["labels"])
tags = [t.lower() for t in tags]
logging.inf |
o(f"Found tags: {tags}")
to_cc = [teams.get(t, []) for t in tags]
to_cc = list(set(item for sublist in to_cc for item in sublist))
to_cc = [user for user in to_cc if user != author]
return to_cc
def get_tags(pr_data: Dict[str, Any], github: GitHubRepo, team_issue: int) -> str:
to_cc = determine_users_to_cc(
issue=pr_data, github=github, team_issue=team_issue, issue_data=None
)
logging.info(f"Users to cc based on labels: {to_cc}")
description = "<sub>See [
if len(to_cc) == 0:
return "No users to tag found in teams " + description
return "cc " + ", ".join([f"@{user}" for user in to_cc]) + " " + description
if __name__ == "__main__":
help = "Automatically tag people based on PR / issue labels"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--team-issue", default="10317", help="issue number to look at for ccs")
parser.add_argument(
"--team-issue-json", help="(testing only) issue JSON to parse rather than fetch from GitHub"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=dry_run_token(args.dry_run), user=user, repo=repo)
if args.team_issue_json:
issue_data = json.loads(args.team_issue_json)
else:
issue_data = fetch_issue(github, issue_number=int(args.team_issue))
issue = json.loads(os.getenv("ISSUE", "null"))
pr = json.loads(os.getenv("PR", "null"))
if (issue is None and pr is None) or (issue is not None and pr is not None):
raise RuntimeError("Exactly one of $PR or $ISSUE must be set in the environment")
if pr is not None:
if pr["draft"]:
print(f"Termi |
nating since {pr['number']} is a draft")
exit(0)
item = issue if issue is not None else pr
title = item["title"]
body = item["body"]
to_cc = determine_users_to_cc(
issue=item, github=github, team_issue=args.team_issue, issue_data=issue_data
)
existing_tags = list(set(re.findall(GITHUB_NAME_REGEX, body)))
existing_tags = set(tag.replace("@", "") for tag in existing_tags)
logging.info(f"Found existing tags: {existing_tags}")
to_cc = [user for user in to_cc if user not in existing_tags]
logging.info("Users to cc based on labels", to_cc)
if len(to_cc) == 0:
logging.info("No one to cc, exiting")
exit(0)
new_body = add_ccs_to_body(body, to_cc)
if new_body is None:
logging.info(f"Everyone to cc is already cc'ed, no update needed")
exit(0)
logging.info(f"Changing body from:\n----\n{body}\n----\nto:\n----\n{new_body}\n----")
data = {"body": new_body}
if issue is not None:
issue_number = issue["number"]
url = f"issues/{issue_number}"
elif pr is not None:
pr_number = pr["number"]
url = f"pulls/{pr_number}"
else:
raise RuntimeError("Unreachable, please report a bug with a link to the failed workflow")
if not args.dry_run:
github.post(url, data=data)
else:
logging.info(f"Dry run, would have updated {url} with {data}") |
import os |
import json |
import argparse |
import warnings |
import logging |
import traceback |
import re
from typing |
import Dict, Any, List, Optional, Callable, Union
from pathlib |
import Path
from git_utils |
import git, GitHubRepo, parse_remote, post
from cmd_utils |
import init_log
Review = Dict[str, Any]
CIJob = Dict[str, Any]
Comment = Dict[str, Any]
CommentChecker = Callable[[Comment], bool]
EXPECTED_JOBS = ["tvm-ci/pr-head"]
TVM_BOT_JENKINS_TOKEN = os.environ["TVM_BOT_JENKINS_TOKEN"]
GH_ACTIONS_TOKEN = os.environ["GH_ACTIONS_TOKEN"]
JENKINS_URL = "https:
THANKS_MESSAGE = r"(\s*)Thanks for contributing to TVM! Please refer to guideline https:
def to_json_str(obj: Any) -> str:
return json.dumps(obj, indent=2)
COLLABORATORS_QUERY = """
query ($owner: String!, $name: String!, $user: String!) {
repository(owner: $owner, name: $name) {
collaborators(query: $user, first: 100) {
nodes {
login
}
}
}
}
"""
MENTIONABLE_QUERY = """
query ($owner: String!, $name: String!, $user: String!) {
repository(owner: $owner, name: $name) {
mentionableUsers(query: $user, first: 100) {
nodes {
login
}
}
}
}
"""
PR_QUERY = """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
title
body
state
author {
login
}
comments(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
authorAssociation
author {
login
}
id
updatedAt
body
}
}
authorCommits:commits(last:100) {
nodes {
commit {
authors(first:100) {
nodes {
name
email
}
}
}
}
}
commits(last: 1) {
nodes {
commit {
oid
statusCheckRollup {
contexts(first: 100) {
pageInfo {
hasNextPage
}
nodes { |
... on CheckRun {
name
databaseId
checkSuite {
workflowRun {
databaseId
workflow {
name
}
}
}
status
conclusion
url
}
... on StatusContext {
state
context
targetUrl
}
}
}
}
}
}
}
reviewDecision
reviews(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
body
updatedAt
url
id
authorCanPushToRepository
commit {
oid
}
author {
login
}
state
}
}
}
}
}
"""
def walk(obj, visitor, parent_key=None):
"""
Recursively call 'visitor' on all the children of a dictionary
"""
visitor(obj, parent_key)
if isinstance(obj, dict):
for k, v in obj.items():
walk(v, visitor, parent_key=k)
elif isinstance(obj, list):
for v in obj:
walk(v, visitor)
class PR:
def __init__(
self,
number: int,
owner: str,
repo: str,
dry_run: bool = False,
raw_data: Dict[str, Any] = None,
):
self.owner = owner
self.number = number
self.repo_name = repo
self.dry_run = dry_run
self.has_error = False
if dry_run and raw_data:
self.raw = raw_data
self.github = None
else:
self.github = GitHubR |
epo(user=owner, repo=repo, token=os.environ["GITHUB_TOKEN"])
if os.getenv("DEBUG", "0") == "1":
cached_path = Path("pr.json")
if not cached_path.exists():
self.raw = self.fetch_data()
with open(cached_path, "w") as f:
json.dump(self.raw, f, indent=2)
else:
with open(cached_path) as f:
self.raw = json.load(f)
else:
self.raw = self.fetch_data()
def checker(obj, parent_key):
"""
Verify that any paged results don't have extra data (if so the bot
may still work since most relevant comments will be more recent)
"""
if parent_key == "pageInfo":
if obj.get("hasPreviousPage", False):
warnings.warn(f"Found {obj} with a previous page, bot may be missing data")
if obj.get("hasNextPage", False):
warnings.warn(f"Found {obj} with a next page, bot may be missing data")
walk(self.raw, checker)
logging.info(f"Verified data, running with PR {to_json_str(self.raw)}")
def __repr__(self):
return json.dumps(self.raw, indent=2)
def react(self, comment: Dict[str, Any], content: str):
"""
React with a thumbs up to a comment
"""
url = f"issues/comments/{comment['id']}/reactions"
data = {"content": content}
if self.dry_run:
logging.info(f"Dry run, would have +1'ed to {url} with {data}")
else:
self.github.post(url, data=data)
def head_commit(self):
return self.raw["commits"]["nodes"][0]["commit"]
def co_authors(self) -> List[str]:
authors = []
for commit in self.raw["authorCommits"]["nodes"]:
for author in commit["commit"]["authors"]["nodes"][1:]: |
name = author["name"]
email = author["email"]
authors.append(f"{name} <{email}>")
return list(set(authors))
def head_oid(self):
return self.head_commit()["oid"]
def ci_jobs(self) -> List[CIJob]:
"""
Get a list of all CI jobs (GitHub Actions and other) in a unified format
"""
jobs = []
for item in self.head_commit()["statusCheckRollup"]["contexts"]["nodes"]:
if "checkSuite" in item:
status = item["conclusion"]
if status is None:
status = "PENDING"
workflow_name = item["checkSuite"]["workflowRun"]["workflow"]["name"]
if workflow_name != "CI":
continue
check_name = item["name"]
jobs.append(
{
"name": f"{workflow_name} / {check_name}",
"url": item["url"],
"status": status.upper(),
}
)
else:
jobs.append(
{
"name": item["context"],
"url": item["targetUrl"],
"status": item["state"].upper(),
}
)
logging.info(f"Found CI jobs for {self.head_commit()['oid']} {to_json_str(jobs)}")
return jobs
def reviews(self) -> List[Review]:
return self.raw["reviews"]["nodes"]
def head_commit_reviews(self) -> List[Review]:
"""
Find reviews associated with the head commit
"""
commits_to_review_status: Dict[str, List[Review]] = {}
for review in self.reviews():
if not review["authorCanPushToRepository"]:
continue
oid = review["commit"]["oid"]
if oid in commit |
s_to_review_status:
commits_to_review_status[oid].append(review)
else:
commits_to_review_status[oid] = [review]
head_reviews = commits_to_review_status.get(self.head_oid(), [])
return head_reviews
def fetch_data(self):
"""
Fetch the data for this PR from GitHub
"""
return self.github.graphql(
query=PR_QUERY,
variables={
"owner": self.owner,
"name": self.repo_name,
"number": self.number,
},
)["data"]["repository"]["pullRequest"]
def search_collaborator(self, user: str) -> List[Dict[str, Any]]:
"""
Query GitHub for collaborators matching 'user'
"""
return self.search_users(user, COLLABORATORS_QUERY)["collaborators"]["nodes"]
def search_users(self, user: str, query: str) -> List[Dict[str, Any]]:
return self.github.graphql(
query=query,
variables={
"owner": self.owner,
"name": self.repo_name,
"user": user,
},
)["data"]["repository"]
def search_mentionable_users(self, user: str) -> List[Dict[str, Any]]:
return self.search_users(user, MENTIONABLE_QUERY)["mentionableUsers"]["nodes"]
def comment(self, text: str) -> None:
"""
Leave the comment 'text' on this PR
"""
logging.info(f"Commenting:\n{text}")
data = {"body": text}
url = f"issues/{self.number}/comments"
if self.dry_run:
logging.info(
f"Dry run, would have commented on url={url} commenting with data={to_json_str(data)}"
)
return
self.github.post(url, data=data)
def state(self) -> str:
"""
PR state (OPEN, CLOSED, MERGED, etc)
"""
return self.raw["state"]
def processed_body(self) -> str:
body = self.raw["body"].strip().replace("\r", "") |
body = re.sub(r"(\s)@", "\g<1>", body)
body = re.sub(THANKS_MESSAGE, "\n\n", body)
return body.strip()
def body_with_co_authors(self) -> str:
"""
Add 'Co-authored-by' strings to the PR body based on the prior commits
in the PR
"""
body = self.processed_body()
author_lines = self.co_authors()
logging.info(f"Found co-authors: author_lines={author_lines}")
full_author_lines = [f"Co-authored-by: {author_line}" for author_line in author_lines]
authors_to_add = []
for author_line in author_lines:
if author_line not in body:
authors_to_add.append(f"Co-authored-by: {author_line}")
if len(authors_to_add) > 0:
full_author_text = "\n".join(authors_to_add)
body = f"{body}\n\n{full_author_text}"
return body
def merge(self) -> None:
"""
Request a merge of this PR via the GitHub API
"""
url = f"pulls/{self.number}/merge"
title = self.raw["title"] + f" (
body = self.body_with_co_authors()
logging.info(f"Full commit:\n{title}\n\n{body}")
data = {
"commit_title": title,
"commit_message": body,
"sha": self.head_oid(),
"merge_method": "squash",
}
if self.dry_run:
logging.info(f"Dry run, would have merged with url={url} and data={to_json_str(data)}")
return
r = self.github.put(url, data=data)
logging.info(f"GitHub merge response: {r}")
return r
def author(self) -> str:
return self.raw["author"]["login"]
def find_failed_ci_jobs(self) -> List[CIJob]:
return [
job
for job in self.ci_jobs()
if job["status"] not in {"SUCCESS", "SUCCESSFUL", "SKIPPED"}
]
def find_missing_expected_jobs(self) -> List[str]:
seen_expected_ |
jobs = {name: False for name in EXPECTED_JOBS}
logging.info(f"Expected to see jobs: {seen_expected_jobs}")
missing_expected_jobs = []
for job in self.ci_jobs():
seen_expected_jobs[job["name"]] = True
for name, seen in seen_expected_jobs.items():
if not seen:
missing_expected_jobs.append(name)
return missing_expected_jobs
def trigger_gha_ci(self, sha: str) -> None:
logging.info(f"POST-ing a workflow_dispatch event to main.yml")
actions_github = GitHubRepo(
user=self.github.user, repo=self.github.repo, token=GH_ACTIONS_TOKEN
)
r = actions_github.post(
url="actions/workflows/main.yml/dispatches",
data={
"ref": "main",
},
)
logging.info(f"Successful workflow_dispatch: {r}")
def merge_if_passed_checks(self) -> Optional[Dict[str, Any]]:
failed_ci_jobs = self.find_failed_ci_jobs()
all_ci_passed = len(failed_ci_jobs) == 0
has_one_approval = False
if not all_ci_passed:
failed_jobs_msg = "\n".join(
[f" * [{job['name']} (`{job['status']}`)]({job['url']})" for job in failed_ci_jobs]
)
self.comment(
f"Cannot merge, these CI jobs are not successful on {self.head_oid()}:\n{failed_jobs_msg}"
)
return None
missing_expected_jobs = self.find_missing_expected_jobs()
if len(missing_expected_jobs) > 0:
missing_jobs_msg = "\n".join([f" * `{name}`" for name in missing_expected_jobs])
self.comment(f"Cannot merge, missing expected jobs:\n{missing_jobs_msg}")
return None
head_commit_reviews = self.head_commit_reviews()
for review in head_commit_reviews:
if review["state"] == "CHANGES_REQUESTED":
self.comment(
f"Cannot merge, found [this review]({review['url']}) on {self.head_oid()} with changes requ |
ested"
)
return None
if review["state"] == "APPROVED":
has_one_approval = True
logging.info(f"Found approving review: {to_json_str(review)}")
if has_one_approval and all_ci_passed:
return self.merge()
elif not has_one_approval:
self.comment(
f"Cannot merge, did not find any approving reviews from users with write access on {self.head_oid()}"
)
return None
elif not all_ci_passed:
self.comment(f"Cannot merge, CI did not pass on on {self.head_oid()}")
return None
def rerun_jenkins_ci(self) -> None:
url = JENKINS_URL + f"job/tvm/job/PR-{self.number}/buildWithParameters"
logging.info(f"Rerunning ci with URL={url}")
if self.dry_run:
logging.info("Dry run, not sending POST")
else:
post(url, auth=("tvm-bot", TVM_BOT_JENKINS_TOKEN))
def rerun_github_actions(self) -> None:
workflow_ids = []
for item in self.head_commit()["statusCheckRollup"]["contexts"]["nodes"]:
if "checkSuite" in item and item["conclusion"] == "FAILURE":
workflow_id = item["checkSuite"]["workflowRun"]["databaseId"]
workflow_ids.append(workflow_id)
workflow_ids = list(set(workflow_ids))
logging.info(f"Rerunning GitHub Actions workflows with IDs: {workflow_ids}")
if self.dry_run:
actions_github = None
else:
actions_github = GitHubRepo(
user=self.github.user, repo=self.github.repo, token=GH_ACTIONS_TOKEN
)
for workflow_id in workflow_ids:
if self.dry_run:
logging.info(f"Dry run, not restarting workflow {workflow_id}")
else:
try:
actions_github.post(f"actions/runs/{workflow_id}/rerun-failed-jobs", data={})
except RuntimeError as e:
logging.excepti |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.