Datasets:

Modalities:
Text
Formats:
json
Languages:
English
Libraries:
Datasets
pandas
License:
json-schema / fetch_history.py
michaelmior's picture
Add script for fetching historical versions
7b8ecc4 verified
raw
history blame
1.73 kB
import csv
import json
import os
import sys
import requests
import requests_ratelimiter
import tqdm
def get_commits(session, repo, path):
query = {"path": path}
headers = {
"Accept": "application/vnd.github+json",
"Authorization": "Bearer " + os.environ["GITHUB_TOKEN"],
"X-GitHub-Api-Version": "2022-11-28",
}
try:
r = session.get(
"https://api.github.com/repos/" + repo + "/commits",
params=query,
headers=headers,
timeout=10,
)
except (
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
):
# Skip on request error
return set()
else:
# Get the commit hashes
return set(c["sha"] for c in r.json())
def main():
# Initialize a new session
session = requests.Session()
adapter = requests_ratelimiter.LimiterAdapter(per_second=2)
session.mount("http://", adapter)
session.mount("https://", adapter)
with open("repos.csv", "r") as csvfile:
# Count number of rows and reset
reader = csv.DictReader(csvfile)
rows = sum(1 for row in reader)
csvfile.seek(0)
reader = csv.DictReader(csvfile)
for row in tqdm.tqdm(reader, total=rows):
# Remove github.com/ from the beginning and fetch commits
repo = row["repository"].split("/", maxsplit=1)[1]
commits = get_commits(session, repo, row["path"])
# Write the collected commits
obj = {"repository": repo, "path": row["path"], "commits": list(commits)}
json.dump(obj, sys.stdout)
sys.stdout.write("\n")
if __name__ == "__main__":
main()