repo
stringclasses 358
values | pull_number
int64 6
67.9k
| instance_id
stringlengths 12
49
| issue_numbers
sequencelengths 1
7
| base_commit
stringlengths 40
40
| patch
stringlengths 87
101M
| test_patch
stringlengths 72
22.3M
| problem_statement
stringlengths 3
256k
| hints_text
stringlengths 0
545k
| created_at
stringlengths 20
20
| PASS_TO_PASS
sequencelengths 0
0
| FAIL_TO_PASS
sequencelengths 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|
chaoss/augur | 46 | chaoss__augur-46 | [
"41"
] | c417d3a13aa33099f6ca6af2db18bf18217b144b | diff --git a/ghdata/ghtorrent.py b/ghdata/ghtorrent.py
--- a/ghdata/ghtorrent.py
+++ b/ghdata/ghtorrent.py
@@ -1,8 +1,6 @@
#SPDX-License-Identifier: MIT
import pandas as pd
import sqlalchemy as s
-import sys
-import json
import re
class GHTorrent(object):
@@ -16,6 +14,10 @@ def __init__(self, dbstr):
"""
self.DB_STR = dbstr
self.db = s.create_engine(dbstr)
+ try:
+ self.userid('howderek')
+ except Exception as e:
+ print("Could not connect to database.\nError: " + str(e))
def __single_table_count_by_date(self, table, repo_col='project_id'):
"""
@@ -327,3 +329,38 @@ def pull_acceptance_rate(self, repoid):
""")
return pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)})
+
+ def classify_contributors(self, repoid):
+ """
+ Classify everyone who has interacted with a repo into
+ - user
+ - tester
+ - rejected_contributor
+ - contributor
+ - major_contributor
+ - maintainer
+
+ :param repoid: The id of the project in the projects table.
+ :return: DataFrame with the login and role of contributors
+ """
+ contributors = self.contributors(repoid)
+ sums = contributors.sum()
+
+ def classify(row):
+ role = 'user'
+ ratio = row / sums
+ if (ratio['issue_comments'] > 0.05):
+ role = 'tester'
+ if (row['pull_requests'] >= 1 and row['commits'] == 0):
+ role = 'rejected_contributor'
+ if (row['pull_requests'] >= 1 and row['commits'] >= 1):
+ role = 'contributor'
+ if (ratio['pull_requests'] > 0.10 or ratio['commits'] > 0.01):
+ role = 'major_contributor'
+ if (ratio['commits'] > 0.02 or ratio['pull_request_comments'] > 0.15):
+ role = 'maintainer'
+
+ return pd.Series({'login': row['login'], 'role': role})
+
+ roles = contributors.apply(classify, axis=1)
+ return roles
diff --git a/ghdata/githubapi.py b/ghdata/githubapi.py
--- a/ghdata/githubapi.py
+++ b/ghdata/githubapi.py
@@ -1,4 +1,8 @@
+
+import datetime
+from dateutil.parser import parse
import pandas as pd
+import github
class GitHubAPI(object):
@@ -6,37 +10,142 @@ class GitHubAPI(object):
GitHubAPI is a class for getting metrics from the GitHub API
"""
def __init__(self, api_key):
- """
- Creates a new GitHub instance
+ """
+ Creates a new GitHub instance
- :param api_key: GitHub API key
- """
- import github
- self.GITUB_API_KEY = api_key
- self.__api = github.Github(api_key)
+ :param api_key: GitHub API key
+ """
+ self.GITUB_API_KEY = api_key
+ self.__api = github.Github(api_key)
- def contributions_by_file(self, owner, repo, start=None, end=None):
+ def contributions_by_file(self, owner, repo, filename=None, start=None, end=None, ascending=False):
"""
Gets number of addtions and deletions in each file by user
- Puts it in dataframe with columns:
- file user num of additions num of deletion total changes
-
Currently ignores changes from local users unattributed to Github users
- WORK IN PROGRESS
-
+ :param owner: repo owner username
+ :param repo: repo name
+ :param filename: optional; file or directory for function to run on
+ :param start: optional; start time for analysis
+ :param end: optional; end time for analysis
+ :param ascending: Default False; returns dataframe in ascending order
"""
+ if start != None:
+ start = parse(start)
+ else:
+ start = github.GithubObject.NotSet
+
+ if end != None:
+ end = parse(end)
+ else:
+ end = github.GithubObject.NotSet
+
+ commits = self.__api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end)
+
+ if filename != None:
+ self.__api.get_repo((owner + "/" + repo)).get_contents(filename)
+
df = []
- for commit in self.__api.get_repo((owner + "/" + repo)).get_commits(since=start,until=end):
+
+ for commit in commits:
for file in commit.files:
+ if filename != None:
+ try:
+ if file.changes != 0 and file.filename == filename:
+ df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes})
+ except AttributeError:
+ pass
+ else:
+ try:
+ if file.changes != 0:
+ df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes})
+ except AttributeError:
+ pass
+
+ df = pd.DataFrame(df)
+
+ df = df.groupby(["file", "user"]).sum()
+
+ df = df.sort_values(ascending=ascending)
+
+ return df
+
+ def contributions_by_percentage(self, owner, repo, filename=None, start=None, end=None, ascending=False):
+ """
+ Calculates percentage of commits in repo by user
+
+ Puts it in dataframe with columns:
+ user percentage of commits
+
+ Currently ignores changes from local users unattributed to Github user
+
+ :param owner: repo owner username
+ :param repo: repo name
+ :param filename: optional; file or directory for function to run on
+ :param start: optional; start time for analysis
+ :param end: optional; end time for analysis
+ :param ascending: Default False; returns dataframe in ascending order
+ """
+ if start != None:
+ start = parse(start)
+ else:
+ start = github.GithubObject.NotSet
+
+ if end != None:
+ end = parse(end)
+ else:
+ end = github.GithubObject.NotSet
+
+ commits = self.__api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end)
+
+ if filename != None:
+ self.__api.get_repo((owner + "/" + repo)).get_contents(filename)
+
+ df = []
+
+ if filename != None:
+ for commit in commits:
+ for file in commit.files:
+ if file.filename == filename:
+ try:
+ df.append({'user': commit.author.login})
+ except AttributeError:
+ pass
+ break
+ else:
+ for commit in commits:
try:
- df.append({'user': commit.author.login, 'file': file.filename, 'additions': file.additions, 'deletions': file.deletions, 'total': file.changes})
+ df.append({'user': commit.author.login})
except AttributeError:
pass
df = pd.DataFrame(df)
- df.groupby(["file" ,"user"]).sum()
+ df = df.groupby(['user']).user.count() / df.groupby(['user']).user.count().sum() * 100
+
+ df = df.sort_values(ascending=ascending)
return df
+
+ def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50, best=False):
+ """
+ Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold
+
+ :param owner: repo owner username
+ :param repo: repo name
+ :param filename: optional; file or directory for function to run on
+ :param start: optional; start time for analysis
+ :param end: optional; end time for analysis
+ :param threshold: Default 50;
+ :param best: Default False; If true, sums from lowest to highestn
+ """
+
+ df = self.contributions_by_percentage(owner, repo, filename, start, end, best)
+
+ i = 0
+ for num in df.cumsum():
+ i = i + 1
+ if num >= threshold:
+ bus_factor = pd.Series(i, index=["Bus Factor"])
+ return bus_factor
diff --git a/ghdata/publicwww.py b/ghdata/publicwww.py
--- a/ghdata/publicwww.py
+++ b/ghdata/publicwww.py
@@ -1,10 +1,13 @@
-import pandas as pd
+"""
+PublicWWW is a class for making API requests to https://publicwww.com/ a
+search engine for the source of websites
+"""
import sys
-if (sys.version_info > (3, 0)):
+import pandas as pd
+if sys.version_info > (3, 0):
import urllib.parse as url
else:
import urllib as url
-import requests
class PublicWWW(object):
@@ -13,13 +16,13 @@ class PublicWWW(object):
search engine for the source of websites
"""
- def __init__(self, public_www_api_key):
+ def __init__(self, api_key):
"""
Initalizes a PublicWWW instance
- :param public_www_api_key: The API key for PublicWWW. This is required to get the full names of more results
+ :param api_key: The API key for PublicWWW. This is required to get the full names of more results
"""
- self.PUBLIC_WWW_API_KEY = public_www_api_key
+ self.__api_key = api_key
def linking_websites(self, owner, repo):
"""
@@ -32,8 +35,9 @@ def linking_websites(self, owner, repo):
"""
# Find websites that link to that repo
- repo_url="https://github.com/{owner}/{repo}".format(owner=owner, repo=repo)
+ repo_url = "https://github.com/{owner}/{repo}".format(owner=owner, repo=repo)
query = '<a+href%3D"{repourl}"'.format(repourl=url.quote_plus(repo_url))
- r = 'https://publicwww.com/websites/{query}/?export=csv&apikey={apikey}'.format(query=query, apikey=self.PUBLIC_WWW_API_KEY)
- result = pd.read_csv(r, delimiter=';', header=None, names=['url', 'rank'])
- return result
\ No newline at end of file
+ req = 'https://publicwww.com/websites/{query}/?export=csv&apikey={apikey}'
+ req.format(query=query, apikey=self.__api_key)
+ result = pd.read_csv(req, delimiter=';', header=None, names=['url', 'rank'])
+ return result
diff --git a/ghdata/server.py b/ghdata/server.py
--- a/ghdata/server.py
+++ b/ghdata/server.py
@@ -1,33 +1,30 @@
#SPDX-License-Identifier: MIT
+import ghdata
-from flask import Flask, request, Response, json, send_from_directory
-from flask_cors import CORS, cross_origin
import os
import sys
-import datetime
if (sys.version_info > (3, 0)):
import configparser as configparser
else:
import ConfigParser as configparser
-from dateutil import parser, tz
-import ghdata
+from flask import Flask, request, Response, send_from_directory
+from flask_cors import CORS
GHDATA_API_VERSION = 'unstable'
-
def serialize(func, **args):
"""
Serailizes a function that returns a dataframe
"""
data = func(**args)
- if (hasattr(data, 'to_json')):
+ if hasattr(data, 'to_json'):
return data.to_json(orient='records', date_format='iso', date_unit='ms')
else:
return data
-def flaskify_ghtorrent(flaskapp, func):
+def flaskify_ghtorrent(ghtorrent, func):
"""
Simplifies API endpoints that just accept owner and repo,
serializes them and spits them out
@@ -35,404 +32,446 @@ def flaskify_ghtorrent(flaskapp, func):
def generated_function(owner, repo):
repoid = ghtorrent.repoid(owner=owner, repo=repo)
return Response(response=serialize(func, repoid=repoid),
- status=200,
- mimetype="application/json")
+ status=200,
+ mimetype="application/json")
generated_function.__name__ = func.__name__
return generated_function
-def flaskify(flaskapp, func):
+def flaskify(func):
"""
Simplifies API endpoints that just accept owner and repo,
serializes them and spits them out
"""
def generated_function(owner, repo):
return Response(response=serialize(func, owner=owner, repo=repo),
- status=200,
- mimetype="application/json")
+ status=200,
+ mimetype="application/json")
generated_function.__name__ = func.__name__
return generated_function
+def read_config(parser, section, name, environment_variable, default):
+ try:
+ value = os.getenv(environment_variable, parser.get(section, name))
+ return value
+ except:
+ if not parser.has_section(section):
+ parser.add_section(section)
+ parser.set(section, name, default)
+ with open('ghdata.cfg', 'w') as configfile:
+ parser.write(configfile)
+ return default
+
-app = Flask(__name__, static_url_path=os.path.abspath('static/'))
-CORS(app)
-# Flags and Initialization
+def run():
-"""Reads the config file"""
-try:
+ app = Flask(__name__)
+ CORS(app)
# Try to open the config file and parse it
parser = configparser.RawConfigParser()
parser.read('ghdata.cfg')
- host = parser.get('Server', 'host')
- port = parser.get('Server', 'port')
+
try:
- dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(parser.get('Database', 'user'), parser.get('Database', 'pass'), parser.get('Database', 'host'), parser.get('Database', 'port'), parser.get('Database', 'name'))
+ dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(
+ read_config(parser, 'Database', 'user', 'GHDATA_DB_USER', 'root'),
+ read_config(parser, 'Database', 'pass', 'GHDATA_DB_PASS', 'password'),
+ read_config(parser, 'Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'),
+ read_config(parser, 'Database', 'port', 'GHDATA_DB_PORT', '3306'),
+ read_config(parser, 'Database', 'name', 'GHDATA_DB_NAME', 'msr14')
+ )
+ print("Connecting with " + dbstr)
ghtorrent = ghdata.GHTorrent(dbstr=dbstr)
except Exception as e:
print("Failed to connect to database (" + str(e) + ")");
- publicwww = ghdata.PublicWWW(public_www_api_key=parser.get('PublicWWW', 'APIKey'))
- if (parser.get('Development', 'developer') == '1' or os.getenv('FLASK_DEBUG') == '1'):
- DEBUG = True
- else:
- DEBUG = False
-
-except Exception as e:
- # Uh-oh. Save a new config file.
- print('Failed to open config file.')
- print('Error: ' + str(e))
- config = configparser.RawConfigParser()
- config.add_section('Server')
- config.set('Server', 'host', '0.0.0.0')
- config.set('Server', 'port', '5000')
- config.add_section('Database')
- config.set('Database', 'host', '127.0.0.1')
- config.set('Database', 'port', '3306')
- config.set('Database', 'user', 'root')
- config.set('Database', 'pass', 'root')
- config.set('Database', 'name', 'ghtorrent')
- config.add_section('PublicWWW')
- config.set('PublicWWW', 'APIKey', '0')
- config.add_section('Development')
- config.set('Development', 'developer', '0')
- # Writing our configuration file to 'example.cfg'
- with open('ghdata.cfg', 'w') as configfile:
- config.write(configfile)
- print('Default config saved to ghdata.cfg')
- sys.exit()
-
-
-
-"""
-@api {get} / API Status
-@apiName Status
-@apiGroup Misc
-"""
[email protected]('/{}/'.format(GHDATA_API_VERSION))
-def api_root():
- """API status"""
- # @todo: When we support multiple data sources this should keep track of their status
- return """{"status": "healthy", "ghtorrent": "online"}"""
-
-#######################
-# Timeseries #
-#######################
-
-# @todo: Link to LF Metrics
-
-"""
-@api {get} /:owner/:repo/commits Commits by Week
-@apiName CommitsByWeek
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "commits": 153
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "commits": 192
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.commits))
-
-"""
-@api {get} /:owner/:repo/forks Forks by Week
-@apiName ForksByWeek
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "forks": 13
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "forks": 12
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.forks))
-
-"""
-@api {get} /:owner/:repo/issues Issues by Week
-@apiName IssuesByWeek
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "issues":13
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "issues":15
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.issues))
-
-"""
-@api {get} /:owner/:repo/issues/response_time Issue Response Time
-@apiName IssueResponseTime
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "created_at": "2013-09-16T17:00:54.000Z",
- "responded_at": "2013-09-16T17:20:58.000Z"
- },
- {
- "created_at": "2013-09-16T09:31:34.000Z",
- "responded_at": "2013-09-16T09:43:03.000Z"
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.issue_response_time))
-
-"""
-@api {get} /:owner/:repo/pulls Pull Requests by Week
-@apiName PullRequestsByWeek
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "pull_requests": 1
- "comments": 11
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "pull_requests": 2
- "comments": 31
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.pulls))
-
-"""
-@api {get} /:owner/:repo/stargazers Stargazers by Week
-@apiName StargazersByWeek
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "watchers": 133
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "watchers": 54
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.stargazers))
-
-"""
-@api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week
-@apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week)
-@apiName Stargazers
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "rate": 0.5
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "rate": 0.33
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.pull_acceptance_rate))
-
-# Contribution Trends
-"""
-@api {get} /:owner/:repo/contributors Total Contributions by User
-@apiName TotalContributions
-@apiGroup Users
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "login": "foo",
- "location": "Springfield",
- "commits": 1337.0,
- "pull_requests": 60.0,
- "issues": null,
- "commit_comments": 158.0,
- "pull_request_comments": 718.0,
- "issue_comments": 1668.0
- },
- {
- "login": "bar",
- "location": null,
- "commits": 3968.0,
- "pull_requests": null,
- "issues": 12.0,
- "commit_comments": 158.0,
- "pull_request_comments": 718.0,
- "issue_comments": 1568.0
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.contributors))
-
-#######################
-# Contribution Trends #
-#######################
-
-"""
-@api {get} /:owner/:repo/contributions Contributions by Week
-@apiName ContributionsByWeek
-@apiGroup Timeseries
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-@apiParam (String) user Limit results to the given user's contributions
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "commits": 37.0,
- "pull_requests": null,
- "issues": null,
- "commit_comments": 7.0,
- "pull_request_comments": 8.0,
- "issue_comments": 17.0
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "commits": 68.0,
- "pull_requests": null,
- "issues": 12.0,
- "commit_comments": 18.0,
- "pull_request_comments": 13.0,
- "issue_comments": 28.0
- }
- ]
-"""
[email protected]('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION))
-def contributions(owner, repo):
- repoid = ghtorrent.repoid(owner=owner, repo=repo)
- user = request.args.get('user')
- if (user):
- userid = ghtorrent.userid(username=user)
- contribs = ghtorrent.contributions(repoid=repoid, userid=userid)
+
+ host = read_config(parser, 'Server', 'host', 'GHDATA_HOST', '0.0.0.0')
+ port = read_config(parser, 'Server', 'port', 'GHDATA_PORT', '5000')
+
+ publicwww = ghdata.PublicWWW(api_key=read_config(parser, 'PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None'))
+ github = ghdata.GitHubAPI(api_key=read_config(parser, 'GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None'))
+
+ if (read_config(parser, 'Development', 'developer', 'GHDATA_DEBUG', '0') == '1'):
+ debugmode = True
else:
- contribs = ghtorrent.contributions(repoid=repoid)
- return Response(response=contribs,
- status=200,
- mimetype="application/json")
-
-# Diversity
-
-"""
-@api {get} /:owner/:repo/commits/locations Commits and Location by User
-@apiName Stargazers
-@apiGroup Diversity
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "login": "bonnie",
- "location": "Rowena, TX",
- "commits": 12
- },
- {
- "login":"clyde",
- "location":"Ellis County, TX",
- "commits": 12
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.committer_locations))
-
-# Popularity
-"""
-@api {get} /:owner/:repo/linking_websites Linking Websites
-@apiDescription Returns an array of websites and their rank according to http://publicwww.com/
-@apiName LinkingWebsites
-@apiGroup Popularity
-
-@apiParam {String} owner Username of the owner of the GitHub repository
-@apiParam {String} repo Name of the GitHub repository
-
-@apiSuccessExample {json} Success-Response:
- [
- {
- "url": "missouri.edu",
- "rank": "1"
- },
- {
- "url": "unomaha.edu",
- "rank": "2"
- }
- ]
-"""
-app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(app, publicwww.linking_websites))
-
-
-if (DEBUG):
- print(" * Serving static routes")
- # Serve the front-end files in debug mode to make it easier for developers to work on the interface
- # @todo: Figure out why this isn't working.
- @app.route('/')
- def index():
- root_dir = os.path.dirname(os.getcwd())
- print(root_dir + '/ghdata/static')
- return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html')
-
- @app.route('/scripts/<path>')
- def send_scripts(path):
- root_dir = os.path.dirname(os.getcwd())
- return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path)
-
- @app.route('/styles/<path>')
- def send_styles(path):
- root_dir = os.path.dirname(os.getcwd())
- return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path)
-
- app.debug = True
+ debugmode = False
-def run():
- app.run(host=host, port=int(port), debug=DEBUG)
+
+
+ """
+ @api {get} / API Status
+ @apiName Status
+ @apiGroup Misc
+ """
+ @app.route('/{}/'.format(GHDATA_API_VERSION))
+ def api_root():
+ """API status"""
+ # @todo: When we support multiple data sources this should keep track of their status
+ # @todo: Add GHTorrent test to determine status
+ ghtorrent_status = "good"
+ # @todo: Add GitHub API status
+ # @todo: Add PublicWWW API status
+ return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status)
+
+ #######################
+ # Timeseries #
+ #######################
+
+ # @todo: Link to LF Metrics
+
+ """
+ @api {get} /:owner/:repo/commits Commits by Week
+ @apiName CommitsByWeek
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "commits": 153
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "commits": 192
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.commits))
+
+ """
+ @api {get} /:owner/:repo/forks Forks by Week
+ @apiName ForksByWeek
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "forks": 13
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "forks": 12
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.forks))
+
+ """
+ @api {get} /:owner/:repo/issues Issues by Week
+ @apiName IssuesByWeek
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "issues":13
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "issues":15
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.issues))
+
+ """
+ @api {get} /:owner/:repo/issues/response_time Issue Response Time
+ @apiName IssueResponseTime
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "created_at": "2013-09-16T17:00:54.000Z",
+ "responded_at": "2013-09-16T17:20:58.000Z"
+ },
+ {
+ "created_at": "2013-09-16T09:31:34.000Z",
+ "responded_at": "2013-09-16T09:43:03.000Z"
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.issue_response_time))
+
+ """
+ @api {get} /:owner/:repo/pulls Pull Requests by Week
+ @apiName PullRequestsByWeek
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "pull_requests": 1
+ "comments": 11
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "pull_requests": 2
+ "comments": 31
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.pulls))
+
+ """
+ @api {get} /:owner/:repo/stargazers Stargazers by Week
+ @apiName StargazersByWeek
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "watchers": 133
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "watchers": 54
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.stargazers))
+
+ """
+ @api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week
+ @apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week)
+ @apiName Stargazers
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "rate": 0.5
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "rate": 0.33
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.pull_acceptance_rate))
+
+ # Contribution Trends
+ """
+ @api {get} /:owner/:repo/contributors Total Contributions by User
+ @apiName TotalContributions
+ @apiGroup Users
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "login": "foo",
+ "location": "Springfield",
+ "commits": 1337.0,
+ "pull_requests": 60.0,
+ "issues": null,
+ "commit_comments": 158.0,
+ "pull_request_comments": 718.0,
+ "issue_comments": 1668.0
+ },
+ {
+ "login": "bar",
+ "location": null,
+ "commits": 3968.0,
+ "pull_requests": null,
+ "issues": 12.0,
+ "commit_comments": 158.0,
+ "pull_request_comments": 718.0,
+ "issue_comments": 1568.0
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.contributors))
+
+ #######################
+ # Contribution Trends #
+ #######################
+
+ """
+ @api {get} /:owner/:repo/contributions Contributions by Week
+ @apiName ContributionsByWeek
+ @apiGroup Timeseries
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+ @apiParam (String) user Limit results to the given user's contributions
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "commits": 37.0,
+ "pull_requests": null,
+ "issues": null,
+ "commit_comments": 7.0,
+ "pull_request_comments": 8.0,
+ "issue_comments": 17.0
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "commits": 68.0,
+ "pull_requests": null,
+ "issues": 12.0,
+ "commit_comments": 18.0,
+ "pull_request_comments": 13.0,
+ "issue_comments": 28.0
+ }
+ ]
+ """
+ @app.route('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION))
+ def contributions(owner, repo):
+ repoid = ghtorrent.repoid(owner=owner, repo=repo)
+ user = request.args.get('user')
+ if (user):
+ userid = ghtorrent.userid(username=user)
+ contribs = ghtorrent.contributions(repoid=repoid, userid=userid)
+ else:
+ contribs = ghtorrent.contributions(repoid=repoid)
+ return Response(response=contribs,
+ status=200,
+ mimetype="application/json")
+
+ # Diversity
+
+ """
+ @api {get} /:owner/:repo/commits/locations Commits and Location by User
+ @apiName Stargazers
+ @apiGroup Diversity
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "login": "bonnie",
+ "location": "Rowena, TX",
+ "commits": 12
+ },
+ {
+ "login":"clyde",
+ "location":"Ellis County, TX",
+ "commits": 12
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))(
+ flaskify_ghtorrent(ghtorrent, ghtorrent.committer_locations))
+
+ # Popularity
+ """
+ @api {get} /:owner/:repo/linking_websites Linking Websites
+ @apiDescription Returns an array of websites and their rank according to http://publicwww.com/
+ @apiName LinkingWebsites
+ @apiGroup Popularity
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "url": "missouri.edu",
+ "rank": "1"
+ },
+ {
+ "url": "unomaha.edu",
+ "rank": "2"
+ }
+ ]
+ """
+ app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(publicwww.linking_websites))
+
+ #######################
+ # GitHub API #
+ #######################
+
+ """
+ @api {get} /:owner/:repo/bus_factor Bus Factor
+ @apiDescription Returns an integer that is the number of develpers that have a summed percentage of contributions higher than the threshold
+ @apiName GitHub
+ @apiGroup Users
+
+ @apiParam {String} owner Username of the owner of the GitHub repository
+ @apiParam {String} repo Name of the GitHub repository
+ @apiParam {String} filename: optional; file or directory for function to run on
+ @apiParam {String} start: optional; start time for analysis
+ @apiParam {String} end: optional; end time for analysis
+ @apiParam {String} threshold: Default 50;
+ @apiParam {String} best: Default False; If true, sums from lowest to highest
+
+ @apiSuccessExample {json} Success-Response:
+ [
+ {
+ "repo": "ghdata",
+ "bus_factor": "2"
+ }
+ ]
+ """
+ @app.route('/{}/<owner>/<repo>/bus_factor'.format(GHDATA_API_VERSION))
+ def bus_factor(owner,repo):
+ kwargs = request.args.to_dict()
+ return Response(response=github.bus_factor(owner, repo, **kwargs).to_json(), status=200, mimetype="application/json")
+
+
+
+ if (debugmode):
+ print(" * Serving static routes")
+ # Serve the front-end files in debug mode to make it easier for developers to work on the interface
+ # @todo: Figure out why this isn't working.
+ @app.route('/')
+ def index():
+ root_dir = os.path.dirname(os.getcwd())
+ print(root_dir + '/ghdata/static')
+ return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html')
+
+ @app.route('/scripts/<path>')
+ def send_scripts(path):
+ root_dir = os.path.dirname(os.getcwd())
+ return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path)
+
+ @app.route('/styles/<path>')
+ def send_styles(path):
+ root_dir = os.path.dirname(os.getcwd())
+ return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path)
+
+ app.debug = True
+
+ app.run(host=host, port=int(port), debug=debugmode)
if __name__ == '__main__':
- run()
\ No newline at end of file
+ run()
| diff --git a/test/test_publicwww.py b/test/test_publicwww.py
--- a/test/test_publicwww.py
+++ b/test/test_publicwww.py
@@ -7,7 +7,7 @@ def publicwww():
import ghdata
key = os.getenv("PUBLIC_WWW_TEST_API_KEY")
assert key is not None and len(key) >= 1
- return ghdata.PublicWWW(public_www_api_key=key)
+ return ghdata.PublicWWW(key)
def test_linking_websites(publicwww):
assert publicwww.linking_websites(owner='yihui', repo='knitr').isin(["sohu.com"]).any
\ No newline at end of file
| Uninstall
Using the current install command to update the version of ghdata conflicts with previously installed versions on an OS level and causes errors whenever you try to start ghdata afterwords. If you add an uninstall script or post the command for it in the README, it would make it a lot easier to stay current.
Thanks,
Spencer Robinson
| 2017-05-11T14:41:44Z | [] | [] |
|
chaoss/augur | 67 | chaoss__augur-67 | [
"56"
] | ccdcba9bae81d4b4256b928e9c4879aca811578b | diff --git a/docs/python/conf.py b/docs/python/source/conf.py
similarity index 89%
rename from docs/python/conf.py
rename to docs/python/source/conf.py
--- a/docs/python/conf.py
+++ b/docs/python/source/conf.py
@@ -1,7 +1,8 @@
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# GHData documentation build configuration file, created by
-# sphinx-quickstart on Mon Feb 20 10:26:03 2017.
+# sphinx-quickstart on Tue Oct 24 12:27:08 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
@@ -16,9 +17,9 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
+import os
+import sys
+sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
@@ -35,7 +36,8 @@
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
- 'sphinx.ext.imgmath',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
@@ -52,18 +54,18 @@
master_doc = 'index'
# General information about the project.
-project = u'GHData'
-copyright = u'2017, OSSHealth Team'
-author = u'OSSHealth Team'
+project = 'GHData'
+copyright = '2017, GHData Contributors'
+author = 'GHData Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = u''
+version = ''
# The full version, including alpha/beta/rc tags.
-release = u''
+release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -75,7 +77,7 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
@@ -84,6 +86,8 @@
todo_include_todos = True
+
+
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
@@ -133,8 +137,8 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'GHData.tex', u'GHData Documentation',
- u'OSSHealth Team', 'manual'),
+ (master_doc, 'GHData.tex', 'GHData Documentation',
+ 'GHData Contributors', 'manual'),
]
@@ -143,7 +147,7 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- (master_doc, 'ghdata', u'GHData Documentation',
+ (master_doc, 'ghdata', 'GHData Documentation',
[author], 1)
]
@@ -154,7 +158,7 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'GHData', u'GHData Documentation',
+ (master_doc, 'GHData', 'GHData Documentation',
author, 'GHData', 'One line description of project.',
'Miscellaneous'),
]
diff --git a/ghdata/__init__.py b/ghdata/__init__.py
--- a/ghdata/__init__.py
+++ b/ghdata/__init__.py
@@ -1,3 +1,7 @@
from .ghtorrent import GHTorrent
from .publicwww import PublicWWW
-from .githubapi import GitHubAPI
\ No newline at end of file
+from .githubapi import GitHubAPI
+from .librariesio import LibrariesIO
+from .util import makeRelative
+from .downloads import Downloads
+from .localcsv import LocalCSV
\ No newline at end of file
diff --git a/deploy.py b/ghdata/deploy.py
similarity index 100%
rename from deploy.py
rename to ghdata/deploy.py
diff --git a/ghdata/downloads.py b/ghdata/downloads.py
new file mode 100644
--- /dev/null
+++ b/ghdata/downloads.py
@@ -0,0 +1,68 @@
+import json
+import pandas as pd
+import requests
+import datetime
+import base64
+
+class Downloads(object):
+ """Class for retrieveing download information using APIs and web scrapers"""
+ def __init__(self, githubapi):
+ self.__githubapi = githubapi._GitHubAPI__api
+
+
+ def downloads(self, owner, repo):
+ """
+ Detects package file and calls correct function for download statistics
+
+ :param owner: repo owner username
+ :param repo: repo name
+ """
+ root_dir = self.__githubapi.get_repo((owner + "/" + repo)).get_dir_contents("/")
+
+ for file in root_dir:
+ if file.name == "Gemfile":
+ return self.ruby_downloads(repo)
+ if file.name == "package.json":
+ contents = base64.b64decode(file.content)
+ contents = contents.decode('utf-8')
+ return self.npm_downloads(repo, contents)
+
+ def ruby_downloads(self, repo):
+ """
+ Returns daily downloads for ruby gems from bestgems.org API
+
+ :param repo: repo name
+ """
+ r = requests.get("http://bestgems.org/api/v1/gems/%s/daily_downloads.json" % (repo))
+ raw = r.text
+ df = pd.DataFrame(json.loads(json.loads(json.dumps(raw))))
+
+ columnsTitles=["date","daily_downloads"]
+ df = df.reindex(columns= columnsTitles)
+ df.rename(columns= {"daily_downloads" : "downloads"}, inplace=True)
+
+ return df
+
+ def npm_downloads(self, repo, contents):
+ """
+ Returns daily downloads for ruby gems from bestgems.org API
+
+ :param repo: repo name
+ :param contents: contents of package.json
+ """
+ contents = json.loads(json.loads(json.dumps(contents)))
+ name = contents["name"]
+ dates = []
+ r = requests.get("https://api.npmjs.org/downloads/range/0:%s/%s" % (datetime.datetime.today().strftime('%Y-%m-%d'), name))
+ raw = r.text
+ raw = json.loads(json.loads(json.dumps(raw)))
+ df = pd.DataFrame(raw["downloads"])
+ df.rename(columns= {"day" : "date"}, inplace=True)
+
+ for i, row in df.iterrows():
+ if row["downloads"] != 0:
+ break
+ else:
+ df.drop(i, inplace=True)
+
+ return df
\ No newline at end of file
diff --git a/ghdata/ghtorrent.py b/ghdata/ghtorrent.py
--- a/ghdata/ghtorrent.py
+++ b/ghdata/ghtorrent.py
@@ -1,6 +1,7 @@
#SPDX-License-Identifier: MIT
import pandas as pd
import sqlalchemy as s
+import numpy as np
import re
class GHTorrent(object):
@@ -19,22 +20,55 @@ def __init__(self, dbstr):
except Exception as e:
print("Could not connect to database.\nError: " + str(e))
- def __single_table_count_by_date(self, table, repo_col='project_id'):
+ def __single_table_count_by_date(self, table, repo_col='project_id', user_col='author_id', group_by="week"):
"""
Generates query string to count occurances of rows per date for a given table.
External input must never be sent to this function, it is for internal use only.
:param table: The table in GHTorrent to generate the string for
:param repo_col: The column in that table with the project ids
+ :param user_col: The column in that table with the user ids
+ :param group_by: Default week; Options raw, day, week, month, year; Selects period of time to be grouped by
:return: Query string
"""
- return """
- SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
- FROM {0}
- WHERE {1} = :repoid
- GROUP BY WEEK(created_at)""".format(table, repo_col)
-
- def repoid(self, owner, repo):
+ if group_by == "raw":
+ return """
+ SELECT date(created_at) AS "date", {2} AS "user_id"
+ FROM {0}
+ WHERE {1} = :repoid
+ """.format(table, repo_col, user_col)
+
+ if group_by == "day":
+ return """
+ SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
+ FROM {0}
+ WHERE {1} = :repoid
+ GROUP BY DATE(created_at)""".format(table, repo_col)
+
+ if group_by == "week":
+ return """
+ SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
+ FROM {0}
+ WHERE {1} = :repoid
+ GROUP BY YEARWEEK(created_at)""".format(table, repo_col)
+
+ if group_by == "month":
+ return """
+ SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
+ FROM {0}
+ WHERE {1} = :repoid
+ GROUP BY MONTH(created_at), YEAR(created_at)""".format(table, repo_col)
+
+ if group_by == "year":
+ return """
+ SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
+ FROM {0}
+ WHERE {1} = :repoid
+ GROUP BY YEAR(created_at)""".format(table, repo_col)
+
+
+
+ def repoid(self, owner_or_repoid, repo=None):
"""
Returns a repository's ID as it appears in the GHTorrent projects table
github.com/[owner]/[project]
@@ -43,11 +77,14 @@ def repoid(self, owner, repo):
:param repo: The name of the repository
:return: The repository's ID as it appears in the GHTorrent projects table
"""
- reposql = s.sql.text('SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :owner')
repoid = 0
- result = self.db.execute(reposql, repo=repo, owner=owner)
- for row in result:
- repoid = row[0]
+ if repo is None:
+ repoid = owner_or_repoid
+ else:
+ reposql = s.sql.text('SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :repoowner')
+ result = self.db.execute(reposql, repo=repo, repoowner=owner_or_repoid)
+ for row in result:
+ repoid = row[0]
return repoid
def userid(self, username):
@@ -66,53 +103,65 @@ def userid(self, username):
# Basic timeseries queries
- def stargazers(self, repoid, start=None, end=None):
+ def stargazers(self, owner, repo=None, group_by="week"):
"""
Timeseries of when people starred a repo
- :param repoid: The id of the project in the projects table. Use repoid() to get this.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with stargazers/day
"""
- stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id'))
- return pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)})
+ repoid = self.repoid(owner, repo)
+ stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by))
+ df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)})
+ df.drop(df.index[:1], inplace=True)
+ return df
- def commits(self, repoid):
+ def commits(self, owner, repo=None, group_by="week"):
"""
Timeseries of all the commits on a repo
- :param repoid: The id of the project in the projects table. Use repoid() to get this.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with commits/day
"""
- commitsSQL = s.sql.text(self.__single_table_count_by_date('commits'))
+ repoid = self.repoid(owner, repo)
+ commitsSQL = s.sql.text(self.__single_table_count_by_date('commits', group_by=group_by))
return pd.read_sql(commitsSQL, self.db, params={"repoid": str(repoid)})
- def forks(self, repoid):
+ def forks(self, owner, repo=None, group_by="week"):
"""
Timeseries of when a repo's forks were created
- :param repoid: The id of the project in the projects table. Use repoid() to get this.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with forks/day
"""
- forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from'))
+ repoid = self.repoid(owner, repo)
+ forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from', 'owner_id', group_by=group_by))
return pd.read_sql(forksSQL, self.db, params={"repoid": str(repoid)}).drop(0)
- def issues(self, repoid):
+ def issues(self, owner, repo=None, group_by="week"):
"""
Timeseries of when people starred a repo
- :param repoid: The id of the project in the projects table. Use repoid() to get this.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with issues/day
"""
- issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id'))
+ repoid = self.repoid(owner, repo)
+ issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id', 'reporter_id', group_by=group_by))
return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)})
- def issues_with_close(self, repoid):
+ def issues_with_close(self, owner, repo=None):
"""
How long on average each week it takes to close an issue
- :param repoid: The id of the project in the projects table. Use repoid() to get this.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with issues/day
"""
+ repoid = self.repoid(owner, repo)
issuesSQL = s.sql.text("""
SELECT issues.id as "id",
issues.created_at as "date",
@@ -127,13 +176,15 @@ def issues_with_close(self, repoid):
WHERE issues.repo_id = :repoid""")
return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)})
- def pulls(self, repoid):
+ def pulls(self, owner, repo=None):
"""
Timeseries of pull requests creation, also gives their associated activity
- :param repoid: The id of the project in the projects table. Use repoid() to get this.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with pull requests by day
"""
+ repoid = self.repoid(owner, repo)
pullsSQL = s.sql.text("""
SELECT date(pull_request_history.created_at) AS "date",
(COUNT(pull_requests.id)) AS "pull_requests",
@@ -148,13 +199,15 @@ def pulls(self, repoid):
""")
return pd.read_sql(pullsSQL, self.db, params={"repoid": str(repoid)})
- def contributors(self, repoid):
+ def contributors(self, owner, repo=None):
"""
All the contributors to a project and the counts of their contributions
- :param repoid: The id of the project in the projects table. Use repoid() to get this.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with users id, users login, and their contributions by type
"""
+ repoid = self.repoid(owner, repo)
contributorsSQL = s.sql.text("""
SELECT * FROM
@@ -204,14 +257,16 @@ def contributors(self, repoid):
return pd.read_sql(contributorsSQL, self.db, index_col=['user_id'], params={"repoid": str(repoid)})
- def contributions(self, repoid, userid=None):
+ def contributions(self, owner, repo=None, userid=None):
"""
Timeseries of all the contributions to a project, optionally limited to a specific user
- :param repoid: The id of the project in the projects table.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:param userid: The id of user if you want to limit the contributions to a specific user.
:return: DataFrame with all of the contributions seperated by day.
"""
+ repoid = self.repoid(owner, repo)
rawContributionsSQL = """
SELECT DATE(coms.created_at) as "date",
coms.count as "commits",
@@ -239,6 +294,7 @@ def contributions(self, repoid, userid=None):
LEFT JOIN (SELECT issue_comments.created_at AS created_at, COUNT(*) AS count FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid[[ AND issue_comments.user_id = :userid]] GROUP BY DATE(issue_comments.created_at)) AS isscoms
ON DATE(isscoms.created_at) = DATE(coms.created_at)
+ GROUP BY YEARWEEK(coms.created_at)
ORDER BY DATE(coms.created_at)
"""
@@ -252,15 +308,17 @@ def contributions(self, repoid, userid=None):
parameterized = s.sql.text(rawContributionsSQL)
return pd.read_sql(parameterized, self.db, params={"repoid": str(repoid)})
- def committer_locations(self, repoid):
+ def committer_locations(self, owner, repo=None):
"""
Return committers and their locations
@todo: Group by country code instead of users, needs the new schema
- :param repoid: The id of the project in the projects table.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
+ :param repo: The name of the repo.
:return: DataFrame with users and locations sorted by commtis
"""
+ repoid = self.repoid(owner, repo)
rawContributionsSQL = s.sql.text("""
SELECT users.login, users.location, COUNT(*) AS "commits"
FROM commits
@@ -269,21 +327,22 @@ def committer_locations(self, repoid):
JOIN users
ON users.id = commits.author_id
WHERE project_commits.project_id = :repoid
- AND LENGTH(users.location) > 1
GROUP BY users.id
ORDER BY commits DESC
""")
return pd.read_sql(rawContributionsSQL, self.db, params={"repoid": str(repoid)})
- def issue_response_time(self, repoid):
+ def issue_response_time(self, owner, repo=None):
"""
How long it takes for issues to be responded to by people who have commits associate with the project
- :param repoid: The id of the project in the projects table.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with the issues' id the date it was
opened, and the date it was first responded to
"""
+ repoid = self.repoid(owner, repo)
issuesSQL = s.sql.text("""
SELECT issues.created_at AS "created_at",
MIN(issue_comments.created_at) AS "responded_at"
@@ -299,18 +358,24 @@ def issue_response_time(self, repoid):
AND issues.repo_id = :repoid
GROUP BY issues.id
""")
- return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)})
+ df = pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)})
+ df['created_at'] = pd.to_datetime(df['created_at'])
+ df['responded_at'] = pd.to_datetime(df['responded_at'])
+ df['hours_between'] = np.floor((df['responded_at'] - df['created_at']) / np.timedelta64(1, 'h'))
+ df = df['hours_between'].value_counts().sort_index().reset_index().rename(columns={'index': 'hours_between', 'hours_between': 'count'})
+ df = df[df['hours_between'] < 48]
+ return df
- def pull_acceptance_rate(self, repoid):
+ def pull_acceptance_rate(self, owner, repo=None):
"""
Timeseries of pull request acceptance rate (Number of pull requests merged on a date over Number of pull requests opened on a date)
- :param repoid: The id of the project in the projects table.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with the pull acceptance rate and the dates
"""
-
+ repoid = self.repoid(owner, repo)
pullAcceptanceSQL = s.sql.text("""
-
SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate"
FROM
(SELECT COUNT(DISTINCT pull_request_id) AS num_approved, DATE(pull_request_history.created_at) AS accepted_on
@@ -330,7 +395,7 @@ def pull_acceptance_rate(self, repoid):
return pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)})
- def classify_contributors(self, repoid):
+ def classify_contributors(self, owner, repo=None):
"""
Classify everyone who has interacted with a repo into
- user
@@ -340,10 +405,12 @@ def classify_contributors(self, repoid):
- major_contributor
- maintainer
- :param repoid: The id of the project in the projects table.
+ :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
+ :param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with the login and role of contributors
"""
- contributors = self.contributors(repoid)
+ repoid = self.repoid(owner, repo)
+ contributors = self.contributors(repoid, repo=None)
sums = contributors.sum()
def classify(row):
@@ -364,3 +431,61 @@ def classify(row):
roles = contributors.apply(classify, axis=1)
return roles
+
+ def community_age(self, owner, repo=None):
+ """
+ Information helpful to determining a community's age
+
+ For now, returns the date of the first of each type of action (fork, pull request, etc.)
+ """
+
+ repoid = self.repoid(owner, repo)
+ communityAgeSQL = s.sql.text("""
+ SELECT DATE(proj.created_at) AS "project",
+ DATE(commits.created_at) AS "commit",
+ DATE(frk.created_at) AS "fork",
+ DATE(iss.created_at) AS "issue",
+ DATE(pr.created_at) AS "pull_request"
+
+ FROM commits
+
+ LEFT JOIN (SELECT forked_from_id AS "repo_id", created_at AS "created_at" FROM forks WHERE forks.forked_from_id = :repoid ORDER BY created_at DESC LIMIT 1) AS frk
+ ON frk.repo_id = commits.project_id
+
+ LEFT JOIN (SELECT repo_id AS "repo_id", created_at AS "created_at" FROM issues WHERE issues.repo_id = :repoid ORDER BY created_at DESC LIMIT 1) AS iss
+ ON iss.repo_id = commits.project_id
+
+ LEFT JOIN (SELECT pull_request_history.created_at AS "created_at", pull_requests.base_repo_id AS "repo_id" FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = 'merged' ORDER BY pull_request_history.created_at DESC LIMIT 1) AS pr
+ ON pr.repo_id = commits.project_id
+
+ LEFT JOIN (SELECT projects.id AS "repo_id", created_at AS "created_at" FROM projects WHERE projects.id = :repoid) AS proj
+ ON proj.repo_id = commits.project_id
+
+ WHERE commits.project_id = :repoid
+ ORDER BY commits.created_at DESC
+ LIMIT 1
+ """)
+
+ return pd.read_sql(communityAgeSQL, self.db, params={"repoid": str(repoid)})
+
+ def unique_committers(self, owner, repo=None):
+ repoid = self.repoid(owner, repo)
+ uniqueCommittersSQL = s.sql.text("""
+ SELECT unique_committers.created_at AS "date", MAX(@number_of_committers:=@number_of_committers+1) total_unique_committers
+ FROM (
+ SELECT author_id, MIN(DATE(created_at)) created_at
+ FROM commits
+ WHERE project_id = :repoid
+ GROUP BY author_id
+ ORDER BY created_at ASC) AS unique_committers,
+ (SELECT @number_of_committers:= 0) AS number_of_committers
+ GROUP BY DATE(unique_committers.created_at)
+ """)
+ return pd.read_sql(uniqueCommittersSQL, self.db, params={"repoid": str(repoid)})
+
+ def ghtorrent_range(self):
+ ghtorrentRangeSQL = s.sql.text("""
+ SELECT MIN(date(created_at)) AS "min_date", MAX(date(created_at)) AS "max_date"
+ FROM commits
+ """)
+ return pd.read_sql(ghtorrentRangeSQL, self.db)
diff --git a/ghdata/githubapi.py b/ghdata/githubapi.py
--- a/ghdata/githubapi.py
+++ b/ghdata/githubapi.py
@@ -1,9 +1,10 @@
-
-import datetime
+from .localcsv import LocalCSV
+import json
+import re
from dateutil.parser import parse
import pandas as pd
import github
-
+import requests
class GitHubAPI(object):
"""
@@ -15,78 +16,21 @@ def __init__(self, api_key):
:param api_key: GitHub API key
"""
- self.GITUB_API_KEY = api_key
+ self.GITHUB_API_KEY = api_key
self.__api = github.Github(api_key)
- def contributions_by_file(self, owner, repo, filename=None, start=None, end=None, ascending=False):
+ def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50):
"""
- Gets number of addtions and deletions in each file by user
-
- Currently ignores changes from local users unattributed to Github users
+ Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold
:param owner: repo owner username
:param repo: repo name
:param filename: optional; file or directory for function to run on
:param start: optional; start time for analysis
:param end: optional; end time for analysis
- :param ascending: Default False; returns dataframe in ascending order
- """
- if start != None:
- start = parse(start)
- else:
- start = github.GithubObject.NotSet
-
- if end != None:
- end = parse(end)
- else:
- end = github.GithubObject.NotSet
-
- commits = self.__api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end)
-
- if filename != None:
- self.__api.get_repo((owner + "/" + repo)).get_contents(filename)
-
- df = []
-
- for commit in commits:
- for file in commit.files:
- if filename != None:
- try:
- if file.changes != 0 and file.filename == filename:
- df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes})
- except AttributeError:
- pass
- else:
- try:
- if file.changes != 0:
- df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes})
- except AttributeError:
- pass
-
- df = pd.DataFrame(df)
-
- df = df.groupby(["file", "user"]).sum()
-
- df = df.sort_values(ascending=ascending)
-
- return df
-
- def contributions_by_percentage(self, owner, repo, filename=None, start=None, end=None, ascending=False):
+ :param threshold: Default 50;
"""
- Calculates percentage of commits in repo by user
-
- Puts it in dataframe with columns:
- user percentage of commits
- Currently ignores changes from local users unattributed to Github user
-
- :param owner: repo owner username
- :param repo: repo name
- :param filename: optional; file or directory for function to run on
- :param start: optional; start time for analysis
- :param end: optional; end time for analysis
- :param ascending: Default False; returns dataframe in ascending order
- """
if start != None:
start = parse(start)
else:
@@ -109,43 +53,159 @@ def contributions_by_percentage(self, owner, repo, filename=None, start=None, en
for file in commit.files:
if file.filename == filename:
try:
- df.append({'user': commit.author.login})
+ df.append({'userid': commit.author.id})
except AttributeError:
pass
break
else:
for commit in commits:
try:
- df.append({'user': commit.author.login})
+ df.append({'userid': commit.author.id})
except AttributeError:
pass
df = pd.DataFrame(df)
- df = df.groupby(['user']).user.count() / df.groupby(['user']).user.count().sum() * 100
+ df = df.groupby(['userid']).userid.count() / df.groupby(['userid']).userid.count().sum() * 100
+
+ i = 0
+ for num in df.cumsum():
+ i = i + 1
+ if num >= threshold:
+ worst = i
+ break
+
+ i = 0
+ for num in df.sort_values(ascending=True).cumsum():
+ i = i + 1
+ if num >= threshold:
+ best = i
+ break
- df = df.sort_values(ascending=ascending)
+ bus_factor = [{'worst': worst, 'best' : best}]
- return df
+ return pd.DataFrame(bus_factor)
- def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50, best=False):
+ def tags(self, owner, repo, raw=False):
"""
- Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold
+ Returns dates and names of tags
:param owner: repo owner username
:param repo: repo name
- :param filename: optional; file or directory for function to run on
- :param start: optional; start time for analysis
- :param end: optional; end time for analysis
- :param threshold: Default 50;
- :param best: Default False; If true, sums from lowest to highestn
+ :param raw: Default False; Returns list of dicts
"""
- df = self.contributions_by_percentage(owner, repo, filename, start, end, best)
+ cursor = "null"
+ tags_list = []
+ url = "https://api.github.com/graphql"
+
+ while True:
+ query = {"query" :
+ """
+ query {
+ repository(owner: "%s", name: "%s") {
+ tags: refs(refPrefix: "refs/tags/", first: 100, after: "%s") {
+ edges {
+ cursor
+ tag: node {
+ name
+ target {
+ ... on Tag {
+ tagger {
+ date
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ """ % (owner, repo, cursor)
+ }
+ r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query)
+ raw = r.text
+ data = json.loads(json.loads(json.dumps(raw)))
+ tags = data['data']['repository']['tags']['edges']
+ for i in tags:
+ try:
+ tags_list.append({'date' : i['tag']['target']['tagger']['date'], 'release' : i['tag']['name']})
+ except KeyError:
+ pass
+ if data['data']['repository']['tags']['edges'] == []:
+ break
+ else:
+ cursor = data['data']['repository']['tags']['edges'][-1]['cursor']
+ return pd.DataFrame(tags_list)
+
+ def major_tags(self, owner, repo):
+ """
+ Returns dates and names of major version (according to semver) tags. May return blank if no major versions
+ :param owner: repo owner username
+ :param repo: repo name
+ """
+ cursor = "null"
+ tags_list = []
+ url = "https://api.github.com/graphql"
+
+ while True:
+ query = {"query" :
+ """
+ query {
+ repository(owner: "%s", name: "%s") {
+ tags: refs(refPrefix: "refs/tags/", first: 100, after: "%s") {
+ edges {
+ cursor
+ tag: node {
+ name
+ target {
+ ... on Tag {
+ tagger {
+ date
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ """ % (owner, repo, cursor)
+ }
+ r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query)
+ raw = r.text
+ data = json.loads(json.loads(json.dumps(raw)))
+ tags = data['data']['repository']['tags']['edges']
+ for i in tags:
+ try:
+ tags_list.append({'date' : i['tag']['target']['tagger']['date'], 'release' : i['tag']['name']})
+ except KeyError:
+ pass
+ if data['data']['repository']['tags']['edges'] == []:
+ break
+ else:
+ cursor = data['data']['repository']['tags']['edges'][-1]['cursor']
+
+ major_versions = []
+ pattern = re.compile("[0-9]+\.[0]+\.[0]+$")
+ for i in tags_list:
+ try:
+ if re.search(pattern, i["release"]) != None:
+ major_versions.append(i)
+ except AttributeError:
+ pass
+
+ return pd.DataFrame(major_versions)
+
+
+ def contributors_gender(self, owner, repo=None):
+ contributors = self.__api.get_repo((owner + "/" + repo)).get_contributors()
+ names = pd.DataFrame(columns=['name'])
i = 0
- for num in df.cumsum():
- i = i + 1
- if num >= threshold:
- bus_factor = pd.Series(i, index=["Bus Factor"])
- return bus_factor
+ for contributor in contributors:
+ if contributor.name is not None:
+ names.loc[i] = [contributor.name.split()[0]]
+ i += 1
+ genderized = names.merge(LocalCSV.name_gender, how='inner', on=['name'])
+ return genderized
\ No newline at end of file
diff --git a/ghdata/librariesio.py b/ghdata/librariesio.py
new file mode 100644
--- /dev/null
+++ b/ghdata/librariesio.py
@@ -0,0 +1,103 @@
+import requests
+import pandas as pd
+import numpy as np
+from bs4 import BeautifulSoup
+
+class LibrariesIO(object):
+ """Handles interaction with https://libraries.io/api to get dependency data"""
+ def __init__(self, api_key, githubapi):
+ self.API_KEY = api_key
+ self.__githubapi = githubapi._GitHubAPI__api
+
+
+ def dependencies(self, owner, repo):
+ """
+ Finds the packages that a project depends on
+
+ :param owner: GitHub username of the owner of the repo
+ :param repo: Repository name
+ :return: Dict that contains the results (https://libraries.io/api#repository-dependencies)
+ """
+ url = "https://libraries.io/api/github/{owner}/{repo}/dependencies".format(owner=owner, repo=repo)
+ r = requests.get(url, params={"api_key": self.API_KEY})
+ return r.json()
+
+ def dependents(self, owner, repo):
+ """
+ Finds the packages depend on this repository
+
+ :param owner: GitHub username of the owner of the repo
+ :param repo: Repository name
+ :return: Dict that contains the results (https://libraries.io/api#project-dependents)
+ """
+ projectsUrl = "https://libraries.io/api/github/{owner}/{repo}/projects".format(owner=owner, repo=repo)
+ projectsRequest = requests.get(projectsUrl, params={"api_key": self.API_KEY})
+ json = projectsRequest.json()
+
+ if projectsRequest.status_code == 400:
+ print('You need to set the LibrariesIO API key in ghdata.cfg or the environment variable GHDATA_LIBRARIESIO_API_KEY')
+
+ if projectsRequest.status_code != 200:
+ return projectsRequest.json()
+ else:
+ project = projectsRequest.json()[0]['name']
+ platform = projectsRequest.json()[0]['platform']
+ dependentsUrl = "https://libraries.io/api/{platform}/{repo}/dependents".format(platform=platform, repo=repo)
+ dependentsRequest = requests.get(dependentsUrl, params={"api_key": self.API_KEY})
+ return dependentsRequest
+
+ def dependency_stats(self, owner, repo):
+ """
+ Finds the number of dependencies, dependant projects, and dependent repos by scrapping it off of the libraries.io website
+
+ :param owner: GitHub username of the owner of the repo
+ :param repo: Repository name
+ :return: Dict that contains the results
+ """
+ root_dir = self.__githubapi.get_repo((owner + "/" + repo)).get_dir_contents("/")
+
+ platform = None
+
+ for file in root_dir:
+ if file.name == "Gemfile":
+ platform = 'rubygems'
+ if file.name == "package.json":
+ platform = 'npm'
+ if file.name == 'setup.py':
+ platform = 'pypi'
+
+ if platform == None:
+ return {'Stats' : 'null'}
+
+ url = "https://libraries.io/{platform}/{repo}/".format(platform=platform, repo=repo)
+
+ resp = requests.get(url)
+
+ if resp.status_code == 404:
+ return {'Stats' : 'null'}
+
+ soup = BeautifulSoup(resp.text, "html.parser")
+
+ infotable = soup.body.div.next_sibling.next_sibling.div.div.next_sibling.next_sibling.dl.next_sibling.next_sibling.next_sibling.next_sibling
+
+ data =[]
+ for child in infotable.children:
+ if child.string == '\n':
+ pass
+ if child.string == None:
+ if child.a != None:
+ data.append(child.a.string)
+ else:
+ data.append(child.string)
+
+ data_new = []
+ for item in data:
+ data_new.append(item.strip('\n'))
+ data_new = list(filter(None, data_new))
+
+ data_new = dict(zip(*[iter(data_new)]*2))
+
+ final_data = {'dependencies' : data_new['Dependencies'], 'dependent_projects' : data_new['Dependent projects'], 'dependent_repositories' : data_new['Dependent repositories']}
+
+ return final_data
+
diff --git a/ghdata/localcsv.py b/ghdata/localcsv.py
new file mode 100644
--- /dev/null
+++ b/ghdata/localcsv.py
@@ -0,0 +1,10 @@
+#SPDX-License-Identifier: MIT
+import pandas as pd
+from .util import get_data_path
+
+class LocalCSV(object):
+
+ def __init__(self):
+ return
+
+ name_gender = pd.read_csv(get_data_path('name_gender.csv'), index_col=0)
\ No newline at end of file
diff --git a/ghdata/server.py b/ghdata/server.py
old mode 100755
new mode 100644
--- a/ghdata/server.py
+++ b/ghdata/server.py
@@ -1,477 +1,723 @@
#SPDX-License-Identifier: MIT
-import ghdata
-
import os
import sys
+import ipdb
+import traceback
if (sys.version_info > (3, 0)):
import configparser as configparser
else:
import ConfigParser as configparser
-from flask import Flask, request, Response, send_from_directory
+sys.path.append('..')
+
+import ghdata
+from flask import Flask, request, Response
from flask_cors import CORS
+import json
-GHDATA_API_VERSION = 'unstable'
+GHDATA_API_VERSION = 'api/unstable'
+# Location to load configuration from
+GHDATA_CONFIG_FILE = open(os.getenv('GHDATA_CONFIG_FILE', 'ghdata.cfg'), 'r+')
+# Options to export the loaded configuration as environment variables for Docker
+GHDATA_ENV_EXPORT = os.getenv('GHDATA_ENV_EXPORT', '0') == '1'
+if GHDATA_ENV_EXPORT:
+ GHDATA_ENV_EXPORT_FILE = open(os.getenv('GHDATA_ENV_EXPORT_FILE', 'lastrun.cfg.sh'), 'w+')
+
+
+def serialize(data, orient='records'):
+
+ if (orient is None):
+ orient = 'records'
+
+ result = ''
-def serialize(func, **args):
- """
- Serailizes a function that returns a dataframe
- """
- data = func(**args)
if hasattr(data, 'to_json'):
- return data.to_json(orient='records', date_format='iso', date_unit='ms')
+ result = data.to_json(orient=orient, date_format='iso', date_unit='ms')
else:
- return data
+ try:
+ result = json.dumps(data)
+ except:
+ result = data
+ return result
-def flaskify_ghtorrent(ghtorrent, func):
+def flaskify(func):
"""
Simplifies API endpoints that just accept owner and repo,
serializes them and spits them out
"""
- def generated_function(owner, repo):
- repoid = ghtorrent.repoid(owner=owner, repo=repo)
- return Response(response=serialize(func, repoid=repoid),
+ def generated_function(*args, **kwargs):
+ kwargs.update(request.args.to_dict())
+ df = func(*args, **kwargs)
+ return Response(response=serialize(df, orient=request.args.get('orient')),
status=200,
mimetype="application/json")
generated_function.__name__ = func.__name__
return generated_function
-def flaskify(func):
+def addMetric(app, function, endpoint):
+ """Simplifies adding routes that only accept owner/repo"""
+ app.route('/{}/<owner>/<repo>/{}'.format(GHDATA_API_VERSION, endpoint))(flaskify(function))
+
+def addTimeseries(app, function, endpoint):
"""
- Simplifies API endpoints that just accept owner and repo,
- serializes them and spits them out
+ Simplifies adding routes that accept owner/repo and return timeseries
+
+ :param app: Flask app
+ :param function: Function from a datasource to add
+ :param endpoint: GET endpoint to generate
"""
- def generated_function(owner, repo):
- return Response(response=serialize(func, owner=owner, repo=repo),
- status=200,
- mimetype="application/json")
- generated_function.__name__ = func.__name__
- return generated_function
+ addMetric(app, function, 'timeseries/{}'.format(endpoint))
+ app.route('/{}/<owner>/<repo>/timeseries/{}/relative_to/<ownerRelativeTo>/<repoRelativeTo>'.format(GHDATA_API_VERSION, endpoint))(flaskify(ghdata.util.makeRelative(function)))
+
-def read_config(parser, section, name, environment_variable, default):
+app = Flask(__name__)
+CORS(app)# Try to open the config file and parse it
+parser = configparser.RawConfigParser()
+parser.readfp(GHDATA_CONFIG_FILE)
+
+if GHDATA_ENV_EXPORT:
+ GHDATA_ENV_EXPORT_FILE.write('#!/bin/bash\n')
+
+def read_config(section, name, environment_variable, default):
+ value = default
try:
value = os.getenv(environment_variable, parser.get(section, name))
- return value
- except:
+ except Exception as e:
if not parser.has_section(section):
parser.add_section(section)
+ print('[' + section + '] -> ' + name + ' is missing. Adding to config...')
parser.set(section, name, default)
- with open('ghdata.cfg', 'w') as configfile:
- parser.write(configfile)
- return default
-
+ parser.write(GHDATA_CONFIG_FILE)
+ value = default
+ if GHDATA_ENV_EXPORT:
+ GHDATA_ENV_EXPORT_FILE.write('export ' + environment_variable + '="' + value + '"\n')
+ return value
+
+host = read_config('Server', 'host', 'GHDATA_HOST', '0.0.0.0')
+port = read_config('Server', 'port', 'GHDATA_PORT', '5000')
+
+publicwww = ghdata.PublicWWW(api_key=read_config('PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None'))
+github = ghdata.GitHubAPI(api_key=read_config('GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None'))
+librariesio = ghdata.LibrariesIO(api_key=read_config('LibrariesIO', 'APIKey', 'GHDATA_LIBRARIESIO_API_KEY', 'None'), githubapi=github)
+downloads = ghdata.Downloads(github)
+localcsv = ghdata.LocalCSV()
+
+if (read_config('Development', 'developer', 'GHDATA_DEBUG', '0') == '1'):
+ debugmode = True
+else:
+ debugmode = False
+
+dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(
+ read_config('Database', 'user', 'GHDATA_DB_USER', 'root'),
+ read_config('Database', 'pass', 'GHDATA_DB_PASS', 'password'),
+ read_config('Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'),
+ read_config('Database', 'port', 'GHDATA_DB_PORT', '3306'),
+ read_config('Database', 'name', 'GHDATA_DB_NAME', 'msr14')
+)
+ghtorrent = ghdata.GHTorrent(dbstr=dbstr)
+
+"""
+@api {get} / API Status
+@apiName Status
+@apiGroup Misc
+"""
[email protected]('/{}/'.format(GHDATA_API_VERSION))
+def api_root():
+ """API status"""
+ # @todo: When we support multiple data sources this should keep track of their status
+ # @todo: Add GHTorrent test to determine status
+ ghtorrent_status = "good"
+ # @todo: Add GitHub API status
+ # @todo: Add PublicWWW API status
+ return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status)
+
+#######################
+# Timeseries #
+#######################
+
+# @todo: Link to LF Metrics
+
+"""
+@api {get} /:owner/:repo/commits/group_by=:group_by Commits
+@apiName Commits
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "commits": 153
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "commits": 192
+ }
+ ]
+"""
+addTimeseries(app, ghtorrent.commits, 'commits')
+
+"""
+@api {get} /:owner/:repo/forks/group_by=:group_by Forks
+@apiName Forks
+@apiGroup Timeseries
+@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "forks": 13
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "forks": 12
+ }
+ ]
+"""
+addTimeseries(app, ghtorrent.forks, 'forks')
+
+"""
+@api {get} /:owner/:repo/issues/group_by=:group_by Issues
+@apiName Issues
+@apiGroup Timeseries
+@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "issues":13
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "issues":15
+ }
+ ]
+"""
+addTimeseries(app, ghtorrent.issues, 'issues')
+
+"""
+@api {get} /:owner/:repo/issues/response_time Issue Response Time
+@apiName IssueResponseTime
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "created_at": "2013-09-16T17:00:54.000Z",
+ "responded_at": "2013-09-16T17:20:58.000Z"
+ },
+ {
+ "created_at": "2013-09-16T09:31:34.000Z",
+ "responded_at": "2013-09-16T09:43:03.000Z"
+ }
+ ]
+"""
+addMetric(app, ghtorrent.issue_response_time, 'issues/response_time')
+
+"""
+@api {get} /:owner/:repo/pulls Pull Requests by Week
+@apiName PullRequestsByWeek
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "pull_requests": 1
+ "comments": 11
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "pull_requests": 2
+ "comments": 31
+ }
+ ]
+"""
+addTimeseries(app, ghtorrent.pulls, 'pulls')
+
+"""
+@api {get} /:owner/:repo/stargazers/group_by=:group_by Stargazers
+@apiName Stargazers
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "watchers": 133
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "watchers": 54
+ }
+ ]
+"""
+addTimeseries(app, ghtorrent.stargazers, 'stargazers')
+
+"""
+@api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week
+@apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week)
+@apiName PullRequestAcceptanceRate
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "rate": 0.5
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "rate": 0.33
+ }
+ ]
+"""
+addTimeseries(app, ghtorrent.pull_acceptance_rate, 'pulls/acceptance_rate')
+
+"""
+@api {get} /:owner/:repo/timeseries/tags Tags release timeseries
+@apiDescription Timeseries of tags
+@apiName Tags
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "release": 0.5
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "release": 0.5.1
+ }
+ ]
+"""
+addTimeseries(app, github.tags, 'tags')
+
+"""
+@api {get} /:owner/:repo/timeseries/tags/major Tags for major releases timeseries
+@apiDescription Timeseries of Major release tags
+@apiName Major Release Tags
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "release": 1.0.0
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "release": 2.0.0
+ }
+ ]
+"""
+addTimeseries(app, github.major_tags, 'tags/major')
+
+"""
+@api {get} /:owner/:repo/timeseries/downloads Number of downloads
+@apiDescription Timeseries of downloads from package manager
+@apiName Downloads
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "downlads": 235
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "dowloads": 327
+ }
+ ]
+"""
+addTimeseries(app, downloads.downloads, 'downloads')
+
+
+
+# Contribution Trends
+"""
+@api {get} /:owner/:repo/contributors Total Contributions by User
+@apiName TotalContributions
+@apiGroup Users
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "login": "foo",
+ "location": "Springfield",
+ "commits": 1337.0,
+ "pull_requests": 60.0,
+ "issues": null,
+ "commit_comments": 158.0,
+ "pull_request_comments": 718.0,
+ "issue_comments": 1668.0
+ },
+ {
+ "login": "bar",
+ "location": null,
+ "commits": 3968.0,
+ "pull_requests": null,
+ "issues": 12.0,
+ "commit_comments": 158.0,
+ "pull_request_comments": 718.0,
+ "issue_comments": 1568.0
+ }
+ ]
+"""
+addMetric(app, ghtorrent.contributors, 'contributors')
+
+#######################
+# Contribution Trends #
+#######################
+
+"""
+@api {get} /:owner/:repo/contributions Contributions by Week
+@apiName ContributionsByWeek
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+@apiParam (String) user Limit results to the given user's contributions
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "date": "2015-01-01T00:00:00.000Z",
+ "commits": 37.0,
+ "pull_requests": null,
+ "issues": null,
+ "commit_comments": 7.0,
+ "pull_request_comments": 8.0,
+ "issue_comments": 17.0
+ },
+ {
+ "date": "2015-01-08T00:00:00.000Z",
+ "commits": 68.0,
+ "pull_requests": null,
+ "issues": 12.0,
+ "commit_comments": 18.0,
+ "pull_request_comments": 13.0,
+ "issue_comments": 28.0
+ }
+ ]
+"""
[email protected]('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION))
+def contributions(owner, repo):
+ repoid = ghtorrent.repoid(owner=owner, repo=repo)
+ user = request.args.get('user')
+ if (user):
+ userid = ghtorrent.userid(username=user)
+ contribs = ghtorrent.contributions(repoid=repoid, userid=userid)
+ else:
+ contribs = ghtorrent.contributions(repoid=repoid)
+ serialized_contributors = serialize(contribs, orient=request.args.get('orient'))
+ return Response(response=serialized_contributors,
+ status=200,
+ mimetype="application/json")
+
+"""
+@api {get} /:owner/:repo/committer_locations Commits and Location by User
+@apiName CommiterLocations
+@apiGroup Diversity
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "login": "bonnie",
+ "location": "Rowena, TX",
+ "commits": 12
+ },
+ {
+ "login":"clyde",
+ "location":"Ellis County, TX",
+ "commits": 12
+ }
+ ]
+"""
+addMetric(app, ghtorrent.committer_locations, 'committer_locations')
+
+
+
+"""
+@api {get} /:owner/:repo/community_age Timeline of events to determine the age of a community
+@apiName CommunityAge
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "login": "bonnie",
+ "location": "Rowena, TX",
+ "commits": 12
+ },
+ {
+ "login":"clyde",
+ "location":"Ellis County, TX",
+ "commits": 12
+ }
+ ]
+"""
+addMetric(app, ghtorrent.community_age, 'community_age')
+
+"""
+@api {get} /:owner/:repo/dependencies List of dependencies from libraries.io
+@apiName Dependencies
+@apiGroup Ecosystem
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ { "full_name": "rails/rails"
+ "description": "Ruby on Rails",
+ "fork": false, "created_at": "2008-04-11T02:19:47.000Z",
+ "updated_at": "2017-09-20T20:16:47.181Z",
+ "pushed_at": "2017-09-20T19:39:08.000Z",
+ "homepage": "http://rubyonrails.org",
+ "size": 155199, "stargazers_count": 36993,
+ "language": "Ruby", "has_issues": true,
+ "has_wiki": false,
+ "has_pages": false,
+ "forks_count": 15130,
+ "mirror_url": null,
+ "open_issues_count": 1157,
+ "default_branch": "master",
+ "subscribers_count": 2452,
+ "uuid": "8514", "source_name": null,
+ "license": "MIT", "private": false,
+ "contributions_count": 2616,
+ "has_readme": "README.md",
+ "has_changelog": null,
+ "has_contributing": "CONTRIBUTING.md",
+ "has_license": "MIT-LICENSE",
+ "has_coc": "CODE_OF_CONDUCT.md",
+ "has_threat_model": null,
+ "has_audit": null,
+ "status": null,
+ "last_synced_at": "2017-09-20T20:16:47.153Z",
+ "rank": 28, "host_type": "GitHub",
+ "host_domain": null,
+ "name": null,
+ "scm": "git",
+ "fork_policy": null,
+ "github_id": "8514",
+ "pull_requests_enabled": null,
+ "logo_url": null,
+ "github_contributions_count": 2616,
+ "keywords": ["activejob", "activerecord", "html", "mvc", "rails", "ruby"],
+ "dependencies": [
+ { "project_name": "websocket-driver",
+ "name": "websocket-driver",
+ "platform": "rubygems",
+ "requirements": "~> 0.6.1",
+ "latest_stable": "0.7.0",
+ "latest": "0.7.0",
+ "deprecated": false, "outdated": true,
+ "filepath": "actioncable/actioncable.gemspec", "
+ kind": "runtime"
+ }
+ ]
+"""
+addMetric(app, librariesio.dependencies, 'dependencies')
+
+"""
+@api {get} /:owner/:repo/dependents List of dependants from libraries.io
+@apiName Dependents
+@apiGroup Ecosystem
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "login": "bonnie",
+ "location": "Rowena, TX",
+ "commits": 12
+ },
+ {
+ "login":"clyde",
+ "location":"Ellis County, TX",
+ "commits": 12
+ }
+ ]
+"""
+addMetric(app, librariesio.dependents, 'dependents')
+
+"""
+@api {get} /:owner/:repo/dependency_stats List of libraries.io stats
+@apiName DependencyStats
+@apiGroup Ecosystem
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "dependencies": "10",
+ "dependent_projects": "10.6K",
+ "dependent_repositories": "392K"
+ }
+ ]
+"""
+addMetric(app, librariesio.dependency_stats, 'dependency_stats')
+
+
+"""
+@api {get} /:owner/:repo/unique_committers Count of new committers weekly
+@apiName UniqueCommiters
+@apiGroup Timeseries
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ { "date":"2009-02-16T00:00:00.000Z",
+ "total_unique_committers":1.0
+ },
+ { "date":"2009-07-12T00:00:00.000Z",
+ "total_unique_committers":2.0
+ },
+ ]
+"""
+addTimeseries(app, ghtorrent.unique_committers, 'unique_committers')
+
+# Popularity
+"""
+@api {get} /:owner/:repo/linking_websites Linking Websites
+@apiDescription Returns an array of websites and their rank according to http://publicwww.com/
+@apiName LinkingWebsites
+@apiGroup Popularity
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "url": "missouri.edu",
+ "rank": "1"
+ },
+ {
+ "url": "unomaha.edu",
+ "rank": "2"
+ }
+ ]
+"""
+addMetric(app, publicwww.linking_websites, 'linking_websites')
+
+"""
+@api {get} /ghtorrent_range Range of dates covered by GHTorrent
+@apiName GhtorrentRange
+@apiGroup Misc
+"""
[email protected]('/{}/ghtorrent_range'.format(GHDATA_API_VERSION))
+
+def ghtorrent_range():
+ ghtorrent_range = serialize(ghtorrent.ghtorrent_range())
+ return Response(response=ghtorrent_range,
+ status=200,
+ mimetype="application/json")
+
+#######################
+# GitHub API #
+#######################
+
+"""
+@api {get} /:owner/:repo/bus_factor Bus Factor
+@apiDescription Returns an integer that is the number of develpers that have a summed percentage of contributions higher than the threshold
+@apiName GitHub
+@apiGroup Users
+
+@apiParam {String} owner Username of the owner of the GitHub repository
+@apiParam {String} repo Name of the GitHub repository
+
+@apiSuccessExample {json} Success-Response:
+ [
+ {
+ "min_date": "2009-02-16T00:00:00.000Z",
+ "max_date": "2017-02-16T00:00:00.000Z"
+ }
+ ]
+"""
+addMetric(app, github.bus_factor, 'bus_factor')
+
+
+
+
+if (debugmode):
+ app.debug = True
+
+if read_config('Development', 'interactive', 'GHDATA_INTERACTIVE', '0') == '1':
+ ipdb.set_trace()
def run():
+ app.run(host=host, port=int(port), debug=debugmode)
- app = Flask(__name__)
- CORS(app)
- # Try to open the config file and parse it
- parser = configparser.RawConfigParser()
- parser.read('ghdata.cfg')
+# Close files
+GHDATA_CONFIG_FILE.close()
+if GHDATA_ENV_EXPORT:
+ GHDATA_ENV_EXPORT_FILE.close()
+if __name__ == "__main__":
try:
- dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(
- read_config(parser, 'Database', 'user', 'GHDATA_DB_USER', 'root'),
- read_config(parser, 'Database', 'pass', 'GHDATA_DB_PASS', 'password'),
- read_config(parser, 'Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'),
- read_config(parser, 'Database', 'port', 'GHDATA_DB_PORT', '3306'),
- read_config(parser, 'Database', 'name', 'GHDATA_DB_NAME', 'msr14')
- )
- print("Connecting with " + dbstr)
- ghtorrent = ghdata.GHTorrent(dbstr=dbstr)
+ run()
except Exception as e:
- print("Failed to connect to database (" + str(e) + ")");
-
- host = read_config(parser, 'Server', 'host', 'GHDATA_HOST', '0.0.0.0')
- port = read_config(parser, 'Server', 'port', 'GHDATA_PORT', '5000')
-
- publicwww = ghdata.PublicWWW(api_key=read_config(parser, 'PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None'))
- github = ghdata.GitHubAPI(api_key=read_config(parser, 'GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None'))
-
- if (read_config(parser, 'Development', 'developer', 'GHDATA_DEBUG', '0') == '1'):
- debugmode = True
- else:
- debugmode = False
-
-
-
- """
- @api {get} / API Status
- @apiName Status
- @apiGroup Misc
- """
- @app.route('/{}/'.format(GHDATA_API_VERSION))
- def api_root():
- """API status"""
- # @todo: When we support multiple data sources this should keep track of their status
- # @todo: Add GHTorrent test to determine status
- ghtorrent_status = "good"
- # @todo: Add GitHub API status
- # @todo: Add PublicWWW API status
- return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status)
-
- #######################
- # Timeseries #
- #######################
-
- # @todo: Link to LF Metrics
-
- """
- @api {get} /:owner/:repo/commits Commits by Week
- @apiName CommitsByWeek
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "commits": 153
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "commits": 192
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.commits))
-
- """
- @api {get} /:owner/:repo/forks Forks by Week
- @apiName ForksByWeek
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "forks": 13
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "forks": 12
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.forks))
-
- """
- @api {get} /:owner/:repo/issues Issues by Week
- @apiName IssuesByWeek
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "issues":13
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "issues":15
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.issues))
-
- """
- @api {get} /:owner/:repo/issues/response_time Issue Response Time
- @apiName IssueResponseTime
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "created_at": "2013-09-16T17:00:54.000Z",
- "responded_at": "2013-09-16T17:20:58.000Z"
- },
- {
- "created_at": "2013-09-16T09:31:34.000Z",
- "responded_at": "2013-09-16T09:43:03.000Z"
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.issue_response_time))
-
- """
- @api {get} /:owner/:repo/pulls Pull Requests by Week
- @apiName PullRequestsByWeek
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "pull_requests": 1
- "comments": 11
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "pull_requests": 2
- "comments": 31
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.pulls))
-
- """
- @api {get} /:owner/:repo/stargazers Stargazers by Week
- @apiName StargazersByWeek
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "watchers": 133
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "watchers": 54
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.stargazers))
-
- """
- @api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week
- @apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week)
- @apiName Stargazers
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "rate": 0.5
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "rate": 0.33
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.pull_acceptance_rate))
-
- # Contribution Trends
- """
- @api {get} /:owner/:repo/contributors Total Contributions by User
- @apiName TotalContributions
- @apiGroup Users
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "login": "foo",
- "location": "Springfield",
- "commits": 1337.0,
- "pull_requests": 60.0,
- "issues": null,
- "commit_comments": 158.0,
- "pull_request_comments": 718.0,
- "issue_comments": 1668.0
- },
- {
- "login": "bar",
- "location": null,
- "commits": 3968.0,
- "pull_requests": null,
- "issues": 12.0,
- "commit_comments": 158.0,
- "pull_request_comments": 718.0,
- "issue_comments": 1568.0
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.contributors))
-
- #######################
- # Contribution Trends #
- #######################
-
- """
- @api {get} /:owner/:repo/contributions Contributions by Week
- @apiName ContributionsByWeek
- @apiGroup Timeseries
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
- @apiParam (String) user Limit results to the given user's contributions
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "date": "2015-01-01T00:00:00.000Z",
- "commits": 37.0,
- "pull_requests": null,
- "issues": null,
- "commit_comments": 7.0,
- "pull_request_comments": 8.0,
- "issue_comments": 17.0
- },
- {
- "date": "2015-01-08T00:00:00.000Z",
- "commits": 68.0,
- "pull_requests": null,
- "issues": 12.0,
- "commit_comments": 18.0,
- "pull_request_comments": 13.0,
- "issue_comments": 28.0
- }
- ]
- """
- @app.route('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION))
- def contributions(owner, repo):
- repoid = ghtorrent.repoid(owner=owner, repo=repo)
- user = request.args.get('user')
- if (user):
- userid = ghtorrent.userid(username=user)
- contribs = ghtorrent.contributions(repoid=repoid, userid=userid)
- else:
- contribs = ghtorrent.contributions(repoid=repoid)
- return Response(response=contribs,
- status=200,
- mimetype="application/json")
-
- # Diversity
-
- """
- @api {get} /:owner/:repo/commits/locations Commits and Location by User
- @apiName Stargazers
- @apiGroup Diversity
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "login": "bonnie",
- "location": "Rowena, TX",
- "commits": 12
- },
- {
- "login":"clyde",
- "location":"Ellis County, TX",
- "commits": 12
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))(
- flaskify_ghtorrent(ghtorrent, ghtorrent.committer_locations))
-
- # Popularity
- """
- @api {get} /:owner/:repo/linking_websites Linking Websites
- @apiDescription Returns an array of websites and their rank according to http://publicwww.com/
- @apiName LinkingWebsites
- @apiGroup Popularity
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "url": "missouri.edu",
- "rank": "1"
- },
- {
- "url": "unomaha.edu",
- "rank": "2"
- }
- ]
- """
- app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(publicwww.linking_websites))
+ print(e)
+ type, value, tb = sys.exc_info()
+ traceback.print_exc()
+ if (debugmode):
+ ipdb.post_mortem(tb)
+ exit(1)
- #######################
- # GitHub API #
- #######################
- """
- @api {get} /:owner/:repo/bus_factor Bus Factor
- @apiDescription Returns an integer that is the number of develpers that have a summed percentage of contributions higher than the threshold
- @apiName GitHub
- @apiGroup Users
-
- @apiParam {String} owner Username of the owner of the GitHub repository
- @apiParam {String} repo Name of the GitHub repository
- @apiParam {String} filename: optional; file or directory for function to run on
- @apiParam {String} start: optional; start time for analysis
- @apiParam {String} end: optional; end time for analysis
- @apiParam {String} threshold: Default 50;
- @apiParam {String} best: Default False; If true, sums from lowest to highest
-
- @apiSuccessExample {json} Success-Response:
- [
- {
- "repo": "ghdata",
- "bus_factor": "2"
- }
- ]
- """
- @app.route('/{}/<owner>/<repo>/bus_factor'.format(GHDATA_API_VERSION))
- def bus_factor(owner,repo):
- kwargs = request.args.to_dict()
- return Response(response=github.bus_factor(owner, repo, **kwargs).to_json(), status=200, mimetype="application/json")
-
-
-
- if (debugmode):
- print(" * Serving static routes")
- # Serve the front-end files in debug mode to make it easier for developers to work on the interface
- # @todo: Figure out why this isn't working.
- @app.route('/')
- def index():
- root_dir = os.path.dirname(os.getcwd())
- print(root_dir + '/ghdata/static')
- return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html')
-
- @app.route('/scripts/<path>')
- def send_scripts(path):
- root_dir = os.path.dirname(os.getcwd())
- return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path)
-
- @app.route('/styles/<path>')
- def send_styles(path):
- root_dir = os.path.dirname(os.getcwd())
- return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path)
-
- app.debug = True
-
- app.run(host=host, port=int(port), debug=debugmode)
-if __name__ == '__main__':
- run()
+
\ No newline at end of file
diff --git a/ghdata/util.py b/ghdata/util.py
new file mode 100644
--- /dev/null
+++ b/ghdata/util.py
@@ -0,0 +1,27 @@
+#SPDX-License-Identifier: MIT
+import pandas as pd
+import os
+
+def makeRelative(function):
+ """
+ Decorator that makes a timeseries relative to another timeseries
+ """
+ def generated_function(owner, repo, ownerRelativeTo, repoRelativeTo):
+ baseData = function(ownerRelativeTo, repoRelativeTo)
+ comparableData = function(owner, repo)
+ columns = list(baseData.columns)
+ columns.remove('date')
+ relativeData = (
+ pd
+ .merge(baseData, comparableData, on='date', how='left')
+ .dropna()
+ )
+ for col in columns:
+ relativeData[col + '_ratio'] = relativeData[col + '_y'] / relativeData[col + '_x']
+ return relativeData
+ generated_function.__name__ = function.__name__ + '_relative'
+ return generated_function
+
+_ROOT = os.path.abspath(os.path.dirname(__file__))
+def get_data_path(path):
+ return os.path.join(_ROOT, 'data', path)
\ No newline at end of file
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@
Install ghdata package with pip.
'''
-from setuptools import setup, find_packages
+from setuptools import setup
from codecs import open
from os import path
@@ -16,7 +16,8 @@
setup(
name='ghdata',
- version='0.2.2',
+ version='0.4.0',
+ include_package_data = True,
description='Library/Server for data related to the health and sustainability of GitHub projects',
long_description=long_description,
url='https://github.com/OSSHealth/ghdata',
@@ -39,7 +40,7 @@
'Programming Language :: Python :: 3.5',
],
keywords='ghtorrent github api data science',
- install_requires=['flask', 'flask-cors', 'PyMySQL', 'requests', 'python-dateutil', 'sqlalchemy', 'pandas', 'pytest', 'PyGithub', 'pyevent'],
+ install_requires=['ipdb', 'setuptools-git', 'beautifulsoup4', 'flask', 'flask-cors', 'PyMySQL', 'requests', 'python-dateutil', 'sqlalchemy', 'pandas', 'pytest', 'PyGithub', 'pyevent', 'gunicorn'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
diff --git a/busFactor/pythonBlameAuthorEmail.py b/unintegrated-python/busFactor/pythonBlameAuthorEmail.py
similarity index 100%
rename from busFactor/pythonBlameAuthorEmail.py
rename to unintegrated-python/busFactor/pythonBlameAuthorEmail.py
diff --git a/busFactor/pythonBlameLinesInRepo.py b/unintegrated-python/busFactor/pythonBlameLinesInRepo.py
similarity index 100%
rename from busFactor/pythonBlameLinesInRepo.py
rename to unintegrated-python/busFactor/pythonBlameLinesInRepo.py
diff --git a/organizationHistory/pythonBlameHistoryTree.py b/unintegrated-python/organizationHistory/pythonBlameHistoryTree.py
similarity index 100%
rename from organizationHistory/pythonBlameHistoryTree.py
rename to unintegrated-python/organizationHistory/pythonBlameHistoryTree.py
diff --git a/views.py b/unintegrated-python/views.py
similarity index 100%
rename from views.py
rename to unintegrated-python/views.py
| diff --git a/test/test_ghtorrent.py b/test/test_ghtorrent.py
--- a/test/test_ghtorrent.py
+++ b/test/test_ghtorrent.py
@@ -1,6 +1,5 @@
import os
import pytest
-import pandas
@pytest.fixture
def ghtorrent():
@@ -10,10 +9,10 @@ def ghtorrent():
return ghdata.GHTorrent(dbstr)
def test_repoid(ghtorrent):
- assert ghtorrent.repoid('rails', 'rails') == 78852
+ assert ghtorrent.repoid('rails', 'rails') >= 1000
def test_userid(ghtorrent):
- assert ghtorrent.userid('howderek') == 417486
+ assert ghtorrent.userid('howderek') >= 1000
"""
Pandas testing format
@@ -47,7 +46,7 @@ def test_committer_locations(ghtorrent):
assert ghtorrent.committer_locations(ghtorrent.repoid('mavam', 'stat-cookbook')).isin(["Berkeley, CA"]).any
def test_issue_response_time(ghtorrent):
- assert ghtorrent.issue_response_time(ghtorrent.repoid('hadley', 'devtools')).isin(["2013-09-16 17:00:54"]).any
+ assert ghtorrent.issue_response_time(ghtorrent.repoid('hadley', 'devtools')).isin([1]).any
def test_pull_acceptance_rate(ghtorrent):
assert ghtorrent.pull_acceptance_rate(ghtorrent.repoid('akka', 'akka')).isin([0.5]).any
diff --git a/test/test_github.py b/test/test_github.py
--- a/test/test_github.py
+++ b/test/test_github.py
@@ -3,6 +3,20 @@
import pandas
@pytest.fixture
-def publicwww():
- import ghdata
- return ghdata.GitHub(os.getenv("GITHUB_API_KEY"))
\ No newline at end of file
+def github():
+ import ghdata
+ return ghdata.GitHubAPI(os.getenv("GITHUB_API_KEY"))
+
+"""
+Pandas testing format
+
+assert ghtorrent.<function>(ghtorrent.repoid('owner', 'repo')).isin(['<data that should be in dataframe>']).any
+
+The tests check if a value is anywhere in the dataframe
+"""
+
+def test_bus_factor(github):
+ assert github.bus_factor("OSSHealth", "ghdata",start="1-1-17", end="5-12-17").isin(["9"]).any
+
+# def test_tags(github):
+# assert github.tags("OSSHealth", "ghdata").isin(["v0.2"]).any
| Comparisons should be updated with new dates
The date edit seems to only affect the base repository. I would expect it to change the rendering for comparisons as well.
| 2018-01-17T16:56:47Z | [] | [] |
|
chaoss/augur | 161 | chaoss__augur-161 | [
"156"
] | 0d64909e8f9efedfcbeec29ed21ee05bce8c61e0 | "diff --git a/augur/__init__.py b/augur/__init__.py\n--- a/augur/__init__.py\n+++ b/augur/__init__.p(...TRUNCATED) | "diff --git a/docs/testing.md b/docs/testing.md\nold mode 100644\nnew mode 100755\n--- a/docs/testin(...TRUNCATED) | "Batch API Logging update\nthe development logger for the Batch API needs to output the underlying e(...TRUNCATED) | 2018-08-27T14:20:23Z | [] | [] |
|
chaoss/augur | 162 | chaoss__augur-162 | [
"156"
] | 0d64909e8f9efedfcbeec29ed21ee05bce8c61e0 | "diff --git a/augur/__init__.py b/augur/__init__.py\n--- a/augur/__init__.py\n+++ b/augur/__init__.p(...TRUNCATED) | "diff --git a/docs/testing.md b/docs/testing.md\nold mode 100644\nnew mode 100755\n--- a/docs/testin(...TRUNCATED) | "Batch API Logging update\nthe development logger for the Batch API needs to output the underlying e(...TRUNCATED) | 2018-08-27T23:22:24Z | [] | [] |
|
chaoss/augur | 330 | chaoss__augur-330 | [
"327"
] | 3d435a9a91086952b0704c021fb87a0d45bb4422 | "diff --git a/augur/datasources/augur_db/augur_db.py b/augur/datasources/augur_db/augur_db.py\n--- a(...TRUNCATED) | "diff --git a/augur/datasources/augur_db/test_augur_db.py b/augur/datasources/augur_db/test_augur_db(...TRUNCATED) | "API Endpoint Request\n#### Calculate Contributions and Display Developers Contributing n % of all C(...TRUNCATED) | @sgoggins what do we name this? | 2019-07-21T10:55:25Z | [] | [] |
chaoss/augur | 573 | chaoss__augur-573 | [
"572"
] | ab039945d047e29ea9f096f283508186d83a56b9 | "diff --git a/augur/metrics/pull_request/pull_request.py b/augur/metrics/pull_request/pull_request.p(...TRUNCATED) | "diff --git a/test/api/test_pull_request_routes.py b/test/api/test_pull_request_routes.py\n--- a/tes(...TRUNCATED) | "`pull_request_closed_no_merge` metric is broken\n**Describe the bug:**\r\nThe `pull_request_closed_(...TRUNCATED) | "@ccarterlandis Has this been occurring only for the empty objects?\nI think It's a 404 error :swea(...TRUNCATED) | 2020-03-02T20:44:54Z | [] | [] |
chaoss/augur | 776 | chaoss__augur-776 | [
"737"
] | 1c299b232a46ce1d84090102fcefe4bc97c23c56 | "diff --git a/augur/__init__.py b/augur/__init__.py\n--- a/augur/__init__.py\n+++ b/augur/__init__.p(...TRUNCATED) | "diff --git a/test/__init__.py b/augur/routes/metrics/__init__.py\nsimilarity index 100%\nrename fro(...TRUNCATED) | "repo_info worker: dev/test branch\nPlease help us help you by filling out the following sections as(...TRUNCATED) | 2020-06-15T20:11:56Z | [] | [] |
|
chaoss/augur | 781 | chaoss__augur-781 | [
"737"
] | 9086d6df824b31c65c678eb19cef86ecb3052ca5 | "diff --git a/augur/__init__.py b/augur/__init__.py\n--- a/augur/__init__.py\n+++ b/augur/__init__.p(...TRUNCATED) | "diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py\ndeleted f(...TRUNCATED) | "repo_info worker: dev/test branch\nPlease help us help you by filling out the following sections as(...TRUNCATED) | 2020-06-17T21:47:52Z | [] | [] |
|
chaoss/augur | 791 | chaoss__augur-791 | [
"737"
] | 7bc330701304d22132f1d95ca326cb18b6988ebf | "diff --git a/augur/__init__.py b/augur/__init__.py\n--- a/augur/__init__.py\n+++ b/augur/__init__.p(...TRUNCATED) | "diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py\ndeleted f(...TRUNCATED) | "repo_info worker: dev/test branch\nPlease help us help you by filling out the following sections as(...TRUNCATED) | 2020-06-21T13:54:53Z | [] | [] |
|
chaoss/augur | 792 | chaoss__augur-792 | [
"737"
] | 5f927b73ab4fae059b40f38df7fd9799bfcbd34b | "diff --git a/augur/__init__.py b/augur/__init__.py\n--- a/augur/__init__.py\n+++ b/augur/__init__.p(...TRUNCATED) | "diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py\ndeleted f(...TRUNCATED) | "repo_info worker: dev/test branch\nPlease help us help you by filling out the following sections as(...TRUNCATED) | 2020-06-21T14:51:02Z | [] | [] |
End of preview. Expand
in Dataset Viewer.
SWE-Gym Raw contains 64,689 instances sourced from 358 Python repos.
Most of the instances there doesn't have associated python environment configured and is not validated with SWE-Bench verification process.
If you are working to scale training environments, these instances might be helpful. Otherwise, please take a look at SWE-Gym and SWE-Gym Lite , why are ready to be used for agent training.
Get started at project page github.com/SWE-Gym/SWE-Gym
Repository | Frequency |
---|---|
pandas-dev/pandas | 2328 |
Qiskit/qiskit | 2252 |
dbt-labs/dbt-core | 2029 |
Transpile-AI/ivy | 1550 |
python/mypy | 1186 |
getmoto/moto | 1002 |
modin-project/modin | 994 |
mne-tools/mne-python | 990 |
Project-MONAI/MONAI | 964 |
dask/dask | 921 |
bokeh/bokeh | 892 |
iterative/dvc | 888 |
pylint-dev/pylint | 854 |
conan-io/conan | 754 |
pantsbuild/pants | 732 |
spyder-ide/spyder | 719 |
googleapis/google-cloud-python | 712 |
numba/numba | 698 |
ansible/ansible | 673 |
great-expectations/great_expectations | 645 |
oppia/oppia | 640 |
google/jax | 628 |
saleor/saleor | 615 |
hpcaitech/ColossalAI | 579 |
scipy/scipy | 566 |
statsmodels/statsmodels | 565 |
edgedb/edgedb | 557 |
rotki/rotki | 544 |
pydantic/pydantic | 530 |
tobymao/sqlglot | 523 |
certbot/certbot | 492 |
Lightning-AI/pytorch-lightning | 484 |
ibis-project/ibis | 479 |
cloud-custodian/cloud-custodian | 476 |
python-pillow/Pillow | 467 |
pypa/pip | 461 |
django-cms/django-cms | 430 |
numpy/numpy | 425 |
dask/distributed | 423 |
mesonbuild/meson | 419 |
netbox-community/netbox | 413 |
biolab/orange3 | 410 |
ansible-collections/community.general | 408 |
HypothesisWorks/hypothesis | 408 |
deepset-ai/haystack | 402 |
privacyidea/privacyidea | 402 |
aws-cloudformation/cfn-lint | 398 |
apache/airflow | 380 |
wagtail/wagtail | 379 |
litestar-org/litestar | 377 |
open-telemetry/opentelemetry-python | 369 |
quantumlib/Cirq | 367 |
fossasia/open-event-server | 360 |
freqtrade/freqtrade | 359 |
matrix-org/synapse | 356 |
DataBiosphere/toil | 353 |
nilearn/nilearn | 352 |
nautobot/nautobot | 343 |
pypi/warehouse | 342 |
freedomofpress/securedrop | 338 |
learningequality/kolibri | 338 |
ipython/ipython | 323 |
chainer/chainer | 314 |
zulip/zulip | 306 |
pytorch/ignite | 298 |
encode/django-rest-framework | 296 |
scikit-image/scikit-image | 288 |
sunpy/sunpy | 286 |
pex-tool/pex | 285 |
bridgecrewio/checkov | 283 |
vyperlang/vyper | 280 |
readthedocs/readthedocs.org | 273 |
streamlink/streamlink | 268 |
conan-io/conan-center-index | 264 |
mitmproxy/mitmproxy | 259 |
ckan/ckan | 258 |
aio-libs/aiohttp | 256 |
hedyorg/hedy | 253 |
networkx/networkx | 252 |
psf/black | 251 |
scrapy/scrapy | 248 |
localstack/localstack | 244 |
qutebrowser/qutebrowser | 243 |
python-telegram-bot/python-telegram-bot | 242 |
streamlit/streamlit | 239 |
geopandas/geopandas | 238 |
getsentry/sentry | 232 |
hylang/hy | 231 |
conda/conda | 231 |
cupy/cupy | 221 |
pyca/cryptography | 221 |
Kinto/kinto | 217 |
spack/spack | 206 |
facebookresearch/hydra | 205 |
buildbot/buildbot | 201 |
mlflow/mlflow | 200 |
py-pdf/pypdf | 199 |
xonsh/xonsh | 199 |
googleapis/python-bigquery | 199 |
allenai/allennlp | 197 |
sktime/sktime | 196 |
huggingface/datasets | 194 |
ray-project/ray | 189 |
celery/celery | 188 |
pre-commit/pre-commit | 186 |
cython/cython | 177 |
open-telemetry/opentelemetry-python-contrib | 175 |
wemake-services/wemake-python-styleguide | 175 |
pypa/pipenv | 174 |
pallets/werkzeug | 173 |
docker/docker-py | 171 |
Textualize/textual | 170 |
huggingface/transformers | 162 |
urllib3/urllib3 | 158 |
pypa/setuptools | 158 |
strawberry-graphql/strawberry | 158 |
falconry/falcon | 154 |
nipy/nipype | 154 |
encode/httpx | 154 |
secdev/scapy | 153 |
joke2k/faker | 151 |
liberapay/liberapay.com | 144 |
obspy/obspy | 142 |
internetarchive/openlibrary | 141 |
aws/aws-cli | 141 |
ethereum/web3.py | 139 |
mkdocs/mkdocs | 138 |
pyro-ppl/pyro | 137 |
piskvorky/gensim | 137 |
pallets/click | 136 |
kornia/kornia | 135 |
StackStorm/st2 | 131 |
MongoEngine/mongoengine | 129 |
sanic-org/sanic | 128 |
opsdroid/opsdroid | 128 |
CTFd/CTFd | 127 |
redis/redis-py | 126 |
meltano/meltano | 126 |
PrefectHQ/prefect | 125 |
jupyterhub/jupyterhub | 121 |
kserve/kserve | 120 |
deis/deis | 117 |
tensorflow/addons | 116 |
feast-dev/feast | 115 |
jazzband/pip-tools | 114 |
emissary-ingress/emissary | 114 |
paperless-ngx/paperless-ngx | 114 |
gradio-app/gradio | 112 |
ManimCommunity/manim | 112 |
beetbox/beets | 110 |
python-poetry/poetry | 107 |
gratipay/gratipay.com | 107 |
fonttools/fonttools | 105 |
PyGithub/PyGithub | 105 |
SciTools/cartopy | 104 |
django-oscar/django-oscar | 104 |
biopython/biopython | 104 |
Theano/Theano | 104 |
twisted/twisted | 98 |
kedro-org/kedro | 97 |
tornadoweb/tornado | 97 |
pyinstaller/pyinstaller | 96 |
getredash/redash | 95 |
pyodide/pyodide | 93 |
cocotb/cocotb | 93 |
optuna/optuna | 93 |
cobbler/cobbler | 92 |
kubeflow/pipelines | 91 |
crytic/slither | 91 |
vispy/vispy | 90 |
goauthentik/authentik | 90 |
Pylons/pyramid | 86 |
dmlc/dgl | 86 |
grafana/oncall | 85 |
praw-dev/praw | 85 |
google/flax | 84 |
encode/starlette | 84 |
arviz-devs/arviz | 83 |
psychopy/psychopy | 83 |
frappe/frappe | 82 |
googleapis/google-auth-library-python | 80 |
vacanza/python-holidays | 80 |
plotly/dash | 79 |
qutip/qutip | 78 |
translate/translate | 78 |
keras-team/keras | 78 |
canonical/snapcraft | 77 |
beeware/toga | 73 |
plotly/plotly.py | 73 |
pypa/virtualenv | 72 |
holoviz/panel | 71 |
holoviz/holoviews | 70 |
qtile/qtile | 70 |
boto/botocore | 70 |
horovod/horovod | 70 |
getpelican/pelican | 69 |
huggingface/diffusers | 68 |
mindsdb/mindsdb | 67 |
python-trio/trio | 67 |
openmc-dev/openmc | 67 |
sublimelsp/LSP | 66 |
flairNLP/flair | 65 |
boto/boto | 65 |
cookiecutter/cookiecutter | 65 |
DataDog/dd-agent | 64 |
mosaicml/composer | 64 |
python-discord/bot | 63 |
ludwig-ai/ludwig | 58 |
nltk/nltk | 58 |
dj-stripe/dj-stripe | 57 |
encode/uvicorn | 57 |
ansible/ansible-lint | 56 |
getnikola/nikola | 56 |
vega/altair | 56 |
celery/kombu | 55 |
jazzband/django-debug-toolbar | 52 |
pyqtgraph/pyqtgraph | 52 |
fail2ban/fail2ban | 51 |
wandb/wandb | 51 |
googleapis/google-api-python-client | 51 |
sopel-irc/sopel | 50 |
locustio/locust | 48 |
typeddjango/django-stubs | 48 |
awslabs/gluonts | 48 |
spotify/luigi | 47 |
lk-geimfari/mimesis | 42 |
nonebot/nonebot2 | 42 |
tinygrad/tinygrad | 42 |
google/timesketch | 41 |
apache/tvm | 40 |
open-mmlab/mmdetection | 40 |
OCA/server-tools | 40 |
kivy/kivy | 39 |
CenterForOpenScience/osf.io | 38 |
DataDog/integrations-core | 38 |
benoitc/gunicorn | 37 |
cloudtools/troposphere | 37 |
Textualize/rich | 37 |
aws/aws-sam-cli | 36 |
ytdl-org/youtube-dl | 36 |
pwndbg/pwndbg | 36 |
angr/angr | 35 |
MycroftAI/mycroft-core | 35 |
cookiecutter/cookiecutter-django | 34 |
searx/searx | 34 |
coala/coala | 34 |
vnpy/vnpy | 33 |
prowler-cloud/prowler | 32 |
aio-libs-abandoned/aioredis-py | 32 |
elastic/elasticsearch-py | 31 |
ansible/molecule | 31 |
facebookresearch/ParlAI | 30 |
microsoft/DeepSpeed | 29 |
bentoml/BentoML | 29 |
ethereum/consensus-specs | 28 |
ansible/awx | 27 |
PokemonGoF/PokemonGo-Bot | 27 |
huggingface/accelerate | 27 |
zalando/patroni | 27 |
AnalogJ/lexicon | 26 |
Cog-Creators/Red-DiscordBot | 25 |
pyg-team/pytorch_geometric | 24 |
tiangolo/fastapi | 24 |
autogluon/autogluon | 23 |
lisa-lab/pylearn2 | 23 |
elastic/ecs | 23 |
explosion/spaCy | 23 |
openai/gym | 22 |
ros/ros_comm | 21 |
koxudaxi/datamodel-code-generator | 21 |
flask-admin/flask-admin | 21 |
ManageIQ/integration_tests | 21 |
activeloopai/deeplake | 20 |
electricitymaps/electricitymaps-contrib | 20 |
zigpy/zha-device-handlers | 20 |
pytorch/text | 19 |
kivy/python-for-android | 19 |
yt-dlp/yt-dlp | 19 |
pennersr/django-allauth | 17 |
aws-powertools/powertools-lambda-python | 17 |
systemd/mkosi | 17 |
blakeblackshear/frigate | 16 |
sagemath/sage | 16 |
speechbrain/speechbrain | 16 |
searxng/searxng | 15 |
pyscript/pyscript | 15 |
aws/aws-sdk-pandas | 15 |
PaddlePaddle/models | 14 |
open-mmlab/mmpose | 14 |
open-mmlab/mmcv | 13 |
open-mmlab/mmengine | 13 |
Mailu/Mailu | 13 |
chaoss/augur | 12 |
ktbyers/netmiko | 12 |
aws/serverless-application-model | 12 |
NVIDIA/NeMo | 12 |
OpenNMT/OpenNMT-py | 11 |
PaddlePaddle/PaddleSpeech | 11 |
Chia-Network/chia-blockchain | 10 |
adap/flower | 10 |
diofant/diofant | 10 |
onnx/onnx | 10 |
microsoft/nni | 9 |
pwr-Solaar/Solaar | 9 |
canonical/cloud-init | 9 |
Bitmessage/PyBitmessage | 9 |
castorini/pyserini | 8 |
lutris/lutris | 8 |
mozilla/kitsune | 8 |
InstaPy/InstaPy | 8 |
nerfstudio-project/nerfstudio | 7 |
canonical/microk8s | 7 |
TheAlgorithms/Python | 7 |
cognitedata/cognite-sdk-python | 7 |
lnbits/lnbits | 7 |
facebookresearch/fairseq | 6 |
cowrie/cowrie | 6 |
PaddlePaddle/PaddleNLP | 6 |
open-mmlab/mmdeploy | 5 |
mantl/mantl | 5 |
open-mmlab/mmsegmentation | 5 |
GoogleCloudPlatform/professional-services | 5 |
OctoPrint/OctoPrint | 4 |
python/typeshed | 4 |
quantopian/zipline | 4 |
bramstroker/homeassistant-powercalc | 4 |
ultrabug/py3status | 3 |
magenta/magenta | 3 |
jupyter/docker-stacks | 3 |
google-parfait/tensorflow-federated | 3 |
jupyterhub/zero-to-jupyterhub-k8s | 3 |
borgbackup/borg | 3 |
OWASP/owasp-mastg | 2 |
VATSIM-UK/UK-Sector-File | 2 |
home-assistant/operating-system | 2 |
frappe/hrms | 2 |
kevoreilly/CAPEv2 | 2 |
lra/mackup | 2 |
dcos/dcos | 2 |
ultralytics/yolov5 | 2 |
Pycord-Development/pycord | 2 |
wenet-e2e/wenet | 2 |
mampfes/hacs_waste_collection_schedule | 2 |
amundsen-io/amundsen | 2 |
pyload/pyload | 1 |
openstates/openstates-scrapers | 1 |
Gallopsled/pwntools | 1 |
sherlock-project/sherlock | 1 |
PaddlePaddle/PaddleOCR | 1 |
talonhub/community | 1 |
python/peps | 1 |
AppDaemon/appdaemon | 1 |
openedx-unsupported/configuration | 1 |
conda-forge/staged-recipes | 1 |
triton-inference-server/server | 1 |
mozilla-services/socorro | 1 |
openedx/edx-ora2 | 1 |
kovidgoyal/kitty | 1 |
- Downloads last month
- 9